use of org.apache.ignite.internal.client.thin.ProtocolVersionFeature.EXPIRY_POLICY in project ignite by apache.
the class ClientUtils method cacheConfiguration.
/**
* Deserialize configuration from stream.
*/
ClientCacheConfiguration cacheConfiguration(BinaryInputStream in, ProtocolContext protocolCtx) throws IOException {
try (BinaryReaderExImpl reader = createBinaryReader(in)) {
// Do not need length to read data. The protocol defines fixed configuration layout.
reader.readInt();
return // cache name is to be assigned later
new ClientCacheConfiguration().setName("TBD").setAtomicityMode(CacheAtomicityMode.fromOrdinal(reader.readInt())).setBackups(reader.readInt()).setCacheMode(CacheMode.fromOrdinal(reader.readInt())).setCopyOnRead(reader.readBoolean()).setDataRegionName(reader.readString()).setEagerTtl(reader.readBoolean()).setStatisticsEnabled(reader.readBoolean()).setGroupName(reader.readString()).setDefaultLockTimeout(reader.readLong()).setMaxConcurrentAsyncOperations(reader.readInt()).setMaxQueryIteratorsCount(reader.readInt()).setName(reader.readString()).setOnheapCacheEnabled(reader.readBoolean()).setPartitionLossPolicy(PartitionLossPolicy.fromOrdinal((byte) reader.readInt())).setQueryDetailMetricsSize(reader.readInt()).setQueryParallelism(reader.readInt()).setReadFromBackup(reader.readBoolean()).setRebalanceBatchSize(reader.readInt()).setRebalanceBatchesPrefetchCount(reader.readLong()).setRebalanceDelay(reader.readLong()).setRebalanceMode(CacheRebalanceMode.fromOrdinal(reader.readInt())).setRebalanceOrder(reader.readInt()).setRebalanceThrottle(reader.readLong()).setRebalanceTimeout(reader.readLong()).setSqlEscapeAll(reader.readBoolean()).setSqlIndexMaxInlineSize(reader.readInt()).setSqlSchema(reader.readString()).setWriteSynchronizationMode(CacheWriteSynchronizationMode.fromOrdinal(reader.readInt())).setKeyConfiguration(ClientUtils.collection(in, unused -> new CacheKeyConfiguration(reader.readString(), reader.readString())).toArray(new CacheKeyConfiguration[0])).setQueryEntities(ClientUtils.collection(in, unused -> {
QueryEntity qryEntity = new QueryEntity(reader.readString(), reader.readString()).setTableName(reader.readString()).setKeyFieldName(reader.readString()).setValueFieldName(reader.readString());
boolean isPrecisionAndScaleSupported = protocolCtx.isFeatureSupported(QUERY_ENTITY_PRECISION_AND_SCALE);
Collection<QueryField> qryFields = ClientUtils.collection(in, unused2 -> {
String name = reader.readString();
String typeName = reader.readString();
boolean isKey = reader.readBoolean();
boolean isNotNull = reader.readBoolean();
Object dfltVal = reader.readObject();
int precision = isPrecisionAndScaleSupported ? reader.readInt() : -1;
int scale = isPrecisionAndScaleSupported ? reader.readInt() : -1;
return new QueryField(name, typeName, isKey, isNotNull, dfltVal, precision, scale);
});
return qryEntity.setFields(qryFields.stream().collect(Collectors.toMap(QueryField::getName, QueryField::getTypeName, (a, b) -> a, LinkedHashMap::new))).setKeyFields(qryFields.stream().filter(QueryField::isKey).map(QueryField::getName).collect(Collectors.toCollection(LinkedHashSet::new))).setNotNullFields(qryFields.stream().filter(QueryField::isNotNull).map(QueryField::getName).collect(Collectors.toSet())).setDefaultFieldValues(qryFields.stream().filter(f -> f.getDefaultValue() != null).collect(Collectors.toMap(QueryField::getName, QueryField::getDefaultValue))).setFieldsPrecision(qryFields.stream().filter(f -> f.getPrecision() != -1).collect(Collectors.toMap(QueryField::getName, QueryField::getPrecision))).setFieldsScale(qryFields.stream().filter(f -> f.getScale() != -1).collect(Collectors.toMap(QueryField::getName, QueryField::getScale))).setAliases(ClientUtils.collection(in, unused3 -> new SimpleEntry<>(reader.readString(), reader.readString())).stream().collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))).setIndexes(ClientUtils.collection(in, unused4 -> {
String name = reader.readString();
QueryIndexType type = QueryIndexType.fromOrdinal(reader.readByte());
int inlineSize = reader.readInt();
LinkedHashMap<String, Boolean> fields = ClientUtils.collection(in, unused5 -> new SimpleEntry<>(reader.readString(), reader.readBoolean())).stream().collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue, (a, b) -> a, LinkedHashMap::new));
return new QueryIndex(fields, type).setName(name).setInlineSize(inlineSize);
}));
}).toArray(new QueryEntity[0])).setExpiryPolicy(!protocolCtx.isFeatureSupported(EXPIRY_POLICY) ? null : reader.readBoolean() ? new PlatformExpiryPolicy(reader.readLong(), reader.readLong(), reader.readLong()) : null);
}
}
use of org.apache.ignite.internal.client.thin.ProtocolVersionFeature.EXPIRY_POLICY in project ignite by apache.
the class ClientUtils method cacheConfiguration.
/**
* Serialize configuration to stream.
*/
void cacheConfiguration(ClientCacheConfiguration cfg, BinaryOutputStream out, ProtocolContext protocolCtx) {
try (BinaryRawWriterEx writer = new BinaryWriterExImpl(marsh.context(), out, null, null)) {
int origPos = out.position();
// configuration length is to be assigned in the end
writer.writeInt(0);
// properties count is to be assigned in the end
writer.writeShort((short) 0);
AtomicInteger propCnt = new AtomicInteger(0);
BiConsumer<CfgItem, Consumer<BinaryRawWriter>> itemWriter = (cfgItem, cfgWriter) -> {
writer.writeShort(cfgItem.code());
cfgWriter.accept(writer);
propCnt.incrementAndGet();
};
itemWriter.accept(CfgItem.NAME, w -> w.writeString(cfg.getName()));
itemWriter.accept(CfgItem.CACHE_MODE, w -> w.writeInt(cfg.getCacheMode().ordinal()));
itemWriter.accept(CfgItem.ATOMICITY_MODE, w -> w.writeInt(cfg.getAtomicityMode().ordinal()));
itemWriter.accept(CfgItem.BACKUPS, w -> w.writeInt(cfg.getBackups()));
itemWriter.accept(CfgItem.WRITE_SYNC_MODE, w -> w.writeInt(cfg.getWriteSynchronizationMode().ordinal()));
itemWriter.accept(CfgItem.READ_FROM_BACKUP, w -> w.writeBoolean(cfg.isReadFromBackup()));
itemWriter.accept(CfgItem.EAGER_TTL, w -> w.writeBoolean(cfg.isEagerTtl()));
itemWriter.accept(CfgItem.GROUP_NAME, w -> w.writeString(cfg.getGroupName()));
itemWriter.accept(CfgItem.DEFAULT_LOCK_TIMEOUT, w -> w.writeLong(cfg.getDefaultLockTimeout()));
itemWriter.accept(CfgItem.PART_LOSS_POLICY, w -> w.writeInt(cfg.getPartitionLossPolicy().ordinal()));
itemWriter.accept(CfgItem.REBALANCE_BATCH_SIZE, w -> w.writeInt(cfg.getRebalanceBatchSize()));
itemWriter.accept(CfgItem.REBALANCE_BATCHES_PREFETCH_COUNT, w -> w.writeLong(cfg.getRebalanceBatchesPrefetchCount()));
itemWriter.accept(CfgItem.REBALANCE_DELAY, w -> w.writeLong(cfg.getRebalanceDelay()));
itemWriter.accept(CfgItem.REBALANCE_MODE, w -> w.writeInt(cfg.getRebalanceMode().ordinal()));
itemWriter.accept(CfgItem.REBALANCE_ORDER, w -> w.writeInt(cfg.getRebalanceOrder()));
itemWriter.accept(CfgItem.REBALANCE_THROTTLE, w -> w.writeLong(cfg.getRebalanceThrottle()));
itemWriter.accept(CfgItem.REBALANCE_TIMEOUT, w -> w.writeLong(cfg.getRebalanceTimeout()));
itemWriter.accept(CfgItem.COPY_ON_READ, w -> w.writeBoolean(cfg.isCopyOnRead()));
itemWriter.accept(CfgItem.DATA_REGION_NAME, w -> w.writeString(cfg.getDataRegionName()));
itemWriter.accept(CfgItem.STATS_ENABLED, w -> w.writeBoolean(cfg.isStatisticsEnabled()));
itemWriter.accept(CfgItem.MAX_ASYNC_OPS, w -> w.writeInt(cfg.getMaxConcurrentAsyncOperations()));
itemWriter.accept(CfgItem.MAX_QUERY_ITERATORS, w -> w.writeInt(cfg.getMaxQueryIteratorsCount()));
itemWriter.accept(CfgItem.ONHEAP_CACHE_ENABLED, w -> w.writeBoolean(cfg.isOnheapCacheEnabled()));
itemWriter.accept(CfgItem.QUERY_METRIC_SIZE, w -> w.writeInt(cfg.getQueryDetailMetricsSize()));
itemWriter.accept(CfgItem.QUERY_PARALLELISM, w -> w.writeInt(cfg.getQueryParallelism()));
itemWriter.accept(CfgItem.SQL_ESCAPE_ALL, w -> w.writeBoolean(cfg.isSqlEscapeAll()));
itemWriter.accept(CfgItem.SQL_IDX_MAX_INLINE_SIZE, w -> w.writeInt(cfg.getSqlIndexMaxInlineSize()));
itemWriter.accept(CfgItem.SQL_SCHEMA, w -> w.writeString(cfg.getSqlSchema()));
itemWriter.accept(CfgItem.KEY_CONFIGS, w -> ClientUtils.collection(cfg.getKeyConfiguration(), out, (unused, i) -> {
w.writeString(i.getTypeName());
w.writeString(i.getAffinityKeyFieldName());
}));
itemWriter.accept(CfgItem.QUERY_ENTITIES, w -> ClientUtils.collection(cfg.getQueryEntities(), out, (unused, e) -> {
w.writeString(e.getKeyType());
w.writeString(e.getValueType());
w.writeString(e.getTableName());
w.writeString(e.getKeyFieldName());
w.writeString(e.getValueFieldName());
ClientUtils.collection(e.getFields().entrySet(), out, (unused2, f) -> {
QueryField qf = new QueryField(e, f);
w.writeString(qf.getName());
w.writeString(qf.getTypeName());
w.writeBoolean(qf.isKey());
w.writeBoolean(qf.isNotNull());
w.writeObject(qf.getDefaultValue());
if (protocolCtx.isFeatureSupported(QUERY_ENTITY_PRECISION_AND_SCALE)) {
w.writeInt(qf.getPrecision());
w.writeInt(qf.getScale());
}
});
ClientUtils.collection(e.getAliases().entrySet(), out, (unused3, a) -> {
w.writeString(a.getKey());
w.writeString(a.getValue());
});
ClientUtils.collection(e.getIndexes(), out, (unused4, i) -> {
w.writeString(i.getName());
w.writeByte((byte) i.getIndexType().ordinal());
w.writeInt(i.getInlineSize());
ClientUtils.collection(i.getFields().entrySet(), out, (unused5, f) -> {
w.writeString(f.getKey());
w.writeBoolean(f.getValue());
});
});
}));
if (protocolCtx.isFeatureSupported(EXPIRY_POLICY)) {
itemWriter.accept(CfgItem.EXPIRE_POLICY, w -> {
ExpiryPolicy expiryPlc = cfg.getExpiryPolicy();
if (expiryPlc == null)
w.writeBoolean(false);
else {
w.writeBoolean(true);
w.writeLong(convertDuration(expiryPlc.getExpiryForCreation()));
w.writeLong(convertDuration(expiryPlc.getExpiryForUpdate()));
w.writeLong(convertDuration(expiryPlc.getExpiryForAccess()));
}
});
} else if (cfg.getExpiryPolicy() != null) {
throw new ClientProtocolError(String.format("Expire policies are not supported by the server " + "version %s, required version %s", protocolCtx.version(), EXPIRY_POLICY.verIntroduced()));
}
// configuration length
writer.writeInt(origPos, out.position() - origPos - 4);
// properties count
writer.writeInt(origPos + 4, propCnt.get());
}
}
Aggregations