use of org.apache.ignite.internal.binary.BinaryReaderExImpl in project ignite by apache.
the class ClientUtils method cacheConfiguration.
/**
* Deserialize configuration from stream.
*/
ClientCacheConfiguration cacheConfiguration(BinaryInputStream in, ProtocolContext protocolCtx) throws IOException {
try (BinaryReaderExImpl reader = createBinaryReader(in)) {
// Do not need length to read data. The protocol defines fixed configuration layout.
reader.readInt();
return // cache name is to be assigned later
new ClientCacheConfiguration().setName("TBD").setAtomicityMode(CacheAtomicityMode.fromOrdinal(reader.readInt())).setBackups(reader.readInt()).setCacheMode(CacheMode.fromOrdinal(reader.readInt())).setCopyOnRead(reader.readBoolean()).setDataRegionName(reader.readString()).setEagerTtl(reader.readBoolean()).setStatisticsEnabled(reader.readBoolean()).setGroupName(reader.readString()).setDefaultLockTimeout(reader.readLong()).setMaxConcurrentAsyncOperations(reader.readInt()).setMaxQueryIteratorsCount(reader.readInt()).setName(reader.readString()).setOnheapCacheEnabled(reader.readBoolean()).setPartitionLossPolicy(PartitionLossPolicy.fromOrdinal((byte) reader.readInt())).setQueryDetailMetricsSize(reader.readInt()).setQueryParallelism(reader.readInt()).setReadFromBackup(reader.readBoolean()).setRebalanceBatchSize(reader.readInt()).setRebalanceBatchesPrefetchCount(reader.readLong()).setRebalanceDelay(reader.readLong()).setRebalanceMode(CacheRebalanceMode.fromOrdinal(reader.readInt())).setRebalanceOrder(reader.readInt()).setRebalanceThrottle(reader.readLong()).setRebalanceTimeout(reader.readLong()).setSqlEscapeAll(reader.readBoolean()).setSqlIndexMaxInlineSize(reader.readInt()).setSqlSchema(reader.readString()).setWriteSynchronizationMode(CacheWriteSynchronizationMode.fromOrdinal(reader.readInt())).setKeyConfiguration(ClientUtils.collection(in, unused -> new CacheKeyConfiguration(reader.readString(), reader.readString())).toArray(new CacheKeyConfiguration[0])).setQueryEntities(ClientUtils.collection(in, unused -> {
QueryEntity qryEntity = new QueryEntity(reader.readString(), reader.readString()).setTableName(reader.readString()).setKeyFieldName(reader.readString()).setValueFieldName(reader.readString());
boolean isPrecisionAndScaleSupported = protocolCtx.isFeatureSupported(QUERY_ENTITY_PRECISION_AND_SCALE);
Collection<QueryField> qryFields = ClientUtils.collection(in, unused2 -> {
String name = reader.readString();
String typeName = reader.readString();
boolean isKey = reader.readBoolean();
boolean isNotNull = reader.readBoolean();
Object dfltVal = reader.readObject();
int precision = isPrecisionAndScaleSupported ? reader.readInt() : -1;
int scale = isPrecisionAndScaleSupported ? reader.readInt() : -1;
return new QueryField(name, typeName, isKey, isNotNull, dfltVal, precision, scale);
});
return qryEntity.setFields(qryFields.stream().collect(Collectors.toMap(QueryField::getName, QueryField::getTypeName, (a, b) -> a, LinkedHashMap::new))).setKeyFields(qryFields.stream().filter(QueryField::isKey).map(QueryField::getName).collect(Collectors.toCollection(LinkedHashSet::new))).setNotNullFields(qryFields.stream().filter(QueryField::isNotNull).map(QueryField::getName).collect(Collectors.toSet())).setDefaultFieldValues(qryFields.stream().filter(f -> f.getDefaultValue() != null).collect(Collectors.toMap(QueryField::getName, QueryField::getDefaultValue))).setFieldsPrecision(qryFields.stream().filter(f -> f.getPrecision() != -1).collect(Collectors.toMap(QueryField::getName, QueryField::getPrecision))).setFieldsScale(qryFields.stream().filter(f -> f.getScale() != -1).collect(Collectors.toMap(QueryField::getName, QueryField::getScale))).setAliases(ClientUtils.collection(in, unused3 -> new SimpleEntry<>(reader.readString(), reader.readString())).stream().collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))).setIndexes(ClientUtils.collection(in, unused4 -> {
String name = reader.readString();
QueryIndexType type = QueryIndexType.fromOrdinal(reader.readByte());
int inlineSize = reader.readInt();
LinkedHashMap<String, Boolean> fields = ClientUtils.collection(in, unused5 -> new SimpleEntry<>(reader.readString(), reader.readBoolean())).stream().collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue, (a, b) -> a, LinkedHashMap::new));
return new QueryIndex(fields, type).setName(name).setInlineSize(inlineSize);
}));
}).toArray(new QueryEntity[0])).setExpiryPolicy(!protocolCtx.isFeatureSupported(EXPIRY_POLICY) ? null : reader.readBoolean() ? new PlatformExpiryPolicy(reader.readLong(), reader.readLong(), reader.readLong()) : null);
}
}
use of org.apache.ignite.internal.binary.BinaryReaderExImpl in project ignite by apache.
the class ClientClusterGroupImpl method requestNodesByIds.
/**
* Requests nodes from the server.
*
* @param nodeIds Node ids.
*/
private Collection<ClusterNode> requestNodesByIds(Collection<UUID> nodeIds) {
try {
return ch.service(ClientOperation.CLUSTER_GROUP_GET_NODE_INFO, req -> {
if (!req.clientChannel().protocolCtx().isFeatureSupported(ProtocolBitmaskFeature.CLUSTER_GROUPS))
throw new ClientFeatureNotSupportedByServerException(ProtocolBitmaskFeature.CLUSTER_GROUPS);
req.out().writeInt(nodeIds.size());
for (UUID nodeId : nodeIds) {
req.out().writeLong(nodeId.getMostSignificantBits());
req.out().writeLong(nodeId.getLeastSignificantBits());
}
}, res -> {
try (BinaryReaderExImpl reader = utils.createBinaryReader(res.in())) {
int nodesCnt = reader.readInt();
Collection<ClusterNode> nodes = new ArrayList<>();
for (int i = 0; i < nodesCnt; i++) {
ClusterNode node = readClusterNode(reader);
cachedNodes.put(node.id(), node);
if (projectionFilters.testClientSidePredicates(node))
nodes.add(node);
}
return nodes;
} catch (IOException e) {
throw new ClientError(e);
}
});
} catch (ClientError e) {
throw new ClientException(e);
}
}
use of org.apache.ignite.internal.binary.BinaryReaderExImpl in project ignite by apache.
the class OdbcMessageParser method decode.
/**
* {@inheritDoc}
*/
@Override
public ClientListenerRequest decode(ClientMessage msg) {
assert msg != null;
BinaryInputStream stream = new BinaryHeapInputStream(msg.payload());
BinaryReaderExImpl reader = new BinaryReaderExImpl(marsh.context(), stream, ctx.config().getClassLoader(), true);
byte cmd = reader.readByte();
ClientListenerRequest res;
switch(cmd) {
case OdbcRequest.QRY_EXEC:
{
String schema = reader.readString();
String sql = reader.readString();
int paramNum = reader.readInt();
Object[] params = readParameterRow(reader, paramNum);
int timeout = 0;
if (ver.compareTo(OdbcConnectionContext.VER_2_3_2) >= 0)
timeout = reader.readInt();
boolean autoCommit = true;
if (ver.compareTo(OdbcConnectionContext.VER_2_7_0) >= 0)
autoCommit = reader.readBoolean();
res = new OdbcQueryExecuteRequest(schema, sql, params, timeout, autoCommit);
break;
}
case OdbcRequest.QRY_EXEC_BATCH:
{
String schema = reader.readString();
String sql = reader.readString();
int paramRowLen = reader.readInt();
int rowNum = reader.readInt();
boolean last = reader.readBoolean();
Object[][] params = new Object[rowNum][];
for (int i = 0; i < rowNum; ++i) params[i] = readParameterRow(reader, paramRowLen);
int timeout = 0;
if (ver.compareTo(OdbcConnectionContext.VER_2_3_2) >= 0)
timeout = reader.readInt();
boolean autoCommit = true;
if (ver.compareTo(OdbcConnectionContext.VER_2_7_0) >= 0)
autoCommit = reader.readBoolean();
res = new OdbcQueryExecuteBatchRequest(schema, sql, last, params, timeout, autoCommit);
break;
}
case OdbcRequest.STREAMING_BATCH:
{
String schema = reader.readString();
int num = reader.readInt();
ArrayList<OdbcQuery> queries = new ArrayList<>(num);
for (int i = 0; i < num; ++i) {
OdbcQuery qry = new OdbcQuery();
qry.readBinary(reader);
queries.add(qry);
}
boolean last = reader.readBoolean();
long order = reader.readLong();
res = new OdbcStreamingBatchRequest(schema, queries, last, order);
break;
}
case OdbcRequest.QRY_FETCH:
{
long queryId = reader.readLong();
int pageSize = reader.readInt();
res = new OdbcQueryFetchRequest(queryId, pageSize);
break;
}
case OdbcRequest.QRY_CLOSE:
{
long queryId = reader.readLong();
res = new OdbcQueryCloseRequest(queryId);
break;
}
case OdbcRequest.META_COLS:
{
String schema = reader.readString();
String table = reader.readString();
String column = reader.readString();
res = new OdbcQueryGetColumnsMetaRequest(schema, table, column);
break;
}
case OdbcRequest.META_TBLS:
{
String catalog = reader.readString();
String schema = reader.readString();
String table = reader.readString();
String tableType = reader.readString();
res = new OdbcQueryGetTablesMetaRequest(catalog, schema, table, tableType);
break;
}
case OdbcRequest.META_PARAMS:
{
String schema = reader.readString();
String sqlQuery = reader.readString();
res = new OdbcQueryGetParamsMetaRequest(schema, sqlQuery);
break;
}
case OdbcRequest.META_RESULTSET:
{
String schema = reader.readString();
String sqlQuery = reader.readString();
res = new OdbcQueryGetResultsetMetaRequest(schema, sqlQuery);
break;
}
case OdbcRequest.MORE_RESULTS:
{
long queryId = reader.readLong();
int pageSize = reader.readInt();
res = new OdbcQueryMoreResultsRequest(queryId, pageSize);
break;
}
default:
throw new IgniteException("Unknown ODBC command: [cmd=" + cmd + ']');
}
return res;
}
use of org.apache.ignite.internal.binary.BinaryReaderExImpl in project ignite by apache.
the class JdbcThinTcpIo method sendRequest.
/**
* @param req Request.
* @param cap Initial ouput stream capacity.
* @return Server response.
* @throws IOException On IO error.
* @throws IgniteCheckedException On error.
*/
@SuppressWarnings("unchecked")
public <R extends JdbcResult> R sendRequest(JdbcRequest req, int cap) throws IOException, IgniteCheckedException {
BinaryWriterExImpl writer = new BinaryWriterExImpl(null, new BinaryHeapOutputStream(cap), null, null);
req.writeBinary(writer);
send(writer.array());
BinaryReaderExImpl reader = new BinaryReaderExImpl(null, new BinaryHeapInputStream(read()), null, null, false);
JdbcResponse res = new JdbcResponse();
res.readBinary(reader);
if (res.status() != SqlListenerResponse.STATUS_SUCCESS)
throw new IgniteCheckedException("Error server response: [req=" + req + ", resp=" + res + ']');
return (R) res.response();
}
use of org.apache.ignite.internal.binary.BinaryReaderExImpl in project ignite by apache.
the class JdbcThinTcpIo method sendRequest.
/**
* @param req Request.
* @return Server response.
* @throws IOException In case of IO error.
*/
@SuppressWarnings("unchecked")
JdbcResponse sendRequest(JdbcRequest req) throws IOException {
int cap = guessCapacity(req);
BinaryWriterExImpl writer = new BinaryWriterExImpl(null, new BinaryHeapOutputStream(cap), null, null);
req.writeBinary(writer);
send(writer.array());
BinaryReaderExImpl reader = new BinaryReaderExImpl(null, new BinaryHeapInputStream(read()), null, null, false);
JdbcResponse res = new JdbcResponse();
res.readBinary(reader);
return res;
}
Aggregations