use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class ClientCacheConfigurationTest method testSerialization.
/**
* Java serialization/deserialization.
*/
@Test
public void testSerialization() throws IOException, ClassNotFoundException {
ClientCacheConfiguration target = new ClientCacheConfiguration().setName("Person").setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setBackups(3).setCacheMode(CacheMode.PARTITIONED).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setEagerTtl(false).setGroupName("FunctionalTest").setDefaultLockTimeout(12345).setPartitionLossPolicy(PartitionLossPolicy.READ_WRITE_ALL).setReadFromBackup(true).setRebalanceBatchSize(67890).setRebalanceBatchesPrefetchCount(102938).setRebalanceDelay(54321).setRebalanceMode(CacheRebalanceMode.SYNC).setRebalanceOrder(2).setRebalanceThrottle(564738).setRebalanceTimeout(142536).setKeyConfiguration(new CacheKeyConfiguration("Employee", "orgId")).setQueryEntities(new QueryEntity(int.class.getName(), "Employee").setTableName("EMPLOYEE").setFields(Stream.of(new SimpleEntry<>("id", Integer.class.getName()), new SimpleEntry<>("orgId", Integer.class.getName())).collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue, (a, b) -> a, LinkedHashMap::new))).setKeyFields(Collections.singleton("id")).setNotNullFields(Collections.singleton("id")).setDefaultFieldValues(Collections.singletonMap("id", 0)).setIndexes(Collections.singletonList(new QueryIndex("id", true, "IDX_EMPLOYEE_ID"))).setAliases(Stream.of("id", "orgId").collect(Collectors.toMap(f -> f, String::toUpperCase))));
ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
ObjectOutput out = new ObjectOutputStream(outBytes);
out.writeObject(target);
out.flush();
ObjectInput in = new ObjectInputStream(new ByteArrayInputStream(outBytes.toByteArray()));
Object desTarget = in.readObject();
assertTrue(Comparers.equal(target, desTarget));
}
use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class ClientCacheConfigurationTest method testDifferentSizeCacheConfiguration.
/**
* Ignite serialization/deserialization of cache configurations with different sizes.
*/
@SuppressWarnings("rawtypes")
@Test
public void testDifferentSizeCacheConfiguration() throws Exception {
Collection<CacheConfiguration> cacheCfgs = new ArrayList<>();
Collection<QueryEntity> qryEntities = new ArrayList<>();
for (int i = 0; i < 5; i++) {
qryEntities.add(new QueryEntity(int.class.getName(), "QueryEntity" + i).setTableName("ENTITY" + i).setFields(new LinkedHashMap<>(F.asMap("id", Integer.class.getName(), "name", String.class.getName()))));
}
CacheConfiguration<?, ?> cfgTemplate = new CacheConfiguration<>().setGroupName("CacheGroupName").setQueryEntities(qryEntities);
String cacheName = "";
for (int i = 0; i < 256; i++) {
cacheName += 'a';
cacheCfgs.add(new CacheConfiguration<>(cfgTemplate).setName(cacheName));
}
startGrid(0).createCaches(cacheCfgs);
try (IgniteClient client = startClient(0)) {
for (CacheConfiguration igniteCacheCfg : cacheCfgs) {
ClientCacheConfiguration clientCacheCfg = client.cache(igniteCacheCfg.getName()).getConfiguration();
assertEquals(igniteCacheCfg.getName(), clientCacheCfg.getName());
}
}
}
use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class ClientUtils method cacheConfiguration.
/**
* Deserialize configuration from stream.
*/
ClientCacheConfiguration cacheConfiguration(BinaryInputStream in, ProtocolContext protocolCtx) throws IOException {
try (BinaryReaderExImpl reader = createBinaryReader(in)) {
// Do not need length to read data. The protocol defines fixed configuration layout.
reader.readInt();
return // cache name is to be assigned later
new ClientCacheConfiguration().setName("TBD").setAtomicityMode(CacheAtomicityMode.fromOrdinal(reader.readInt())).setBackups(reader.readInt()).setCacheMode(CacheMode.fromOrdinal(reader.readInt())).setCopyOnRead(reader.readBoolean()).setDataRegionName(reader.readString()).setEagerTtl(reader.readBoolean()).setStatisticsEnabled(reader.readBoolean()).setGroupName(reader.readString()).setDefaultLockTimeout(reader.readLong()).setMaxConcurrentAsyncOperations(reader.readInt()).setMaxQueryIteratorsCount(reader.readInt()).setName(reader.readString()).setOnheapCacheEnabled(reader.readBoolean()).setPartitionLossPolicy(PartitionLossPolicy.fromOrdinal((byte) reader.readInt())).setQueryDetailMetricsSize(reader.readInt()).setQueryParallelism(reader.readInt()).setReadFromBackup(reader.readBoolean()).setRebalanceBatchSize(reader.readInt()).setRebalanceBatchesPrefetchCount(reader.readLong()).setRebalanceDelay(reader.readLong()).setRebalanceMode(CacheRebalanceMode.fromOrdinal(reader.readInt())).setRebalanceOrder(reader.readInt()).setRebalanceThrottle(reader.readLong()).setRebalanceTimeout(reader.readLong()).setSqlEscapeAll(reader.readBoolean()).setSqlIndexMaxInlineSize(reader.readInt()).setSqlSchema(reader.readString()).setWriteSynchronizationMode(CacheWriteSynchronizationMode.fromOrdinal(reader.readInt())).setKeyConfiguration(ClientUtils.collection(in, unused -> new CacheKeyConfiguration(reader.readString(), reader.readString())).toArray(new CacheKeyConfiguration[0])).setQueryEntities(ClientUtils.collection(in, unused -> {
QueryEntity qryEntity = new QueryEntity(reader.readString(), reader.readString()).setTableName(reader.readString()).setKeyFieldName(reader.readString()).setValueFieldName(reader.readString());
boolean isPrecisionAndScaleSupported = protocolCtx.isFeatureSupported(QUERY_ENTITY_PRECISION_AND_SCALE);
Collection<QueryField> qryFields = ClientUtils.collection(in, unused2 -> {
String name = reader.readString();
String typeName = reader.readString();
boolean isKey = reader.readBoolean();
boolean isNotNull = reader.readBoolean();
Object dfltVal = reader.readObject();
int precision = isPrecisionAndScaleSupported ? reader.readInt() : -1;
int scale = isPrecisionAndScaleSupported ? reader.readInt() : -1;
return new QueryField(name, typeName, isKey, isNotNull, dfltVal, precision, scale);
});
return qryEntity.setFields(qryFields.stream().collect(Collectors.toMap(QueryField::getName, QueryField::getTypeName, (a, b) -> a, LinkedHashMap::new))).setKeyFields(qryFields.stream().filter(QueryField::isKey).map(QueryField::getName).collect(Collectors.toCollection(LinkedHashSet::new))).setNotNullFields(qryFields.stream().filter(QueryField::isNotNull).map(QueryField::getName).collect(Collectors.toSet())).setDefaultFieldValues(qryFields.stream().filter(f -> f.getDefaultValue() != null).collect(Collectors.toMap(QueryField::getName, QueryField::getDefaultValue))).setFieldsPrecision(qryFields.stream().filter(f -> f.getPrecision() != -1).collect(Collectors.toMap(QueryField::getName, QueryField::getPrecision))).setFieldsScale(qryFields.stream().filter(f -> f.getScale() != -1).collect(Collectors.toMap(QueryField::getName, QueryField::getScale))).setAliases(ClientUtils.collection(in, unused3 -> new SimpleEntry<>(reader.readString(), reader.readString())).stream().collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))).setIndexes(ClientUtils.collection(in, unused4 -> {
String name = reader.readString();
QueryIndexType type = QueryIndexType.fromOrdinal(reader.readByte());
int inlineSize = reader.readInt();
LinkedHashMap<String, Boolean> fields = ClientUtils.collection(in, unused5 -> new SimpleEntry<>(reader.readString(), reader.readBoolean())).stream().collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue, (a, b) -> a, LinkedHashMap::new));
return new QueryIndex(fields, type).setName(name).setInlineSize(inlineSize);
}));
}).toArray(new QueryEntity[0])).setExpiryPolicy(!protocolCtx.isFeatureSupported(EXPIRY_POLICY) ? null : reader.readBoolean() ? new PlatformExpiryPolicy(reader.readLong(), reader.readLong(), reader.readLong()) : null);
}
}
use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class GridCacheUtils method initializeConfigDefaults.
/**
* @param cfg Initializes cache configuration with proper defaults.
* @param cacheObjCtx Cache object context.
* @throws IgniteCheckedException If configuration is not valid.
*/
public static void initializeConfigDefaults(IgniteLogger log, CacheConfiguration cfg, CacheObjectContext cacheObjCtx) throws IgniteCheckedException {
if (cfg.getCacheMode() == null)
cfg.setCacheMode(DFLT_CACHE_MODE);
if (cfg.getNodeFilter() == null)
cfg.setNodeFilter(CacheConfiguration.ALL_NODES);
if (cfg.getAffinity() == null) {
if (cfg.getCacheMode() == PARTITIONED) {
RendezvousAffinityFunction aff = new RendezvousAffinityFunction();
cfg.setAffinity(aff);
} else if (cfg.getCacheMode() == REPLICATED) {
RendezvousAffinityFunction aff = new RendezvousAffinityFunction(false, 512);
cfg.setAffinity(aff);
cfg.setBackups(Integer.MAX_VALUE);
} else
cfg.setAffinity(new LocalAffinityFunction());
} else {
if (cfg.getCacheMode() == LOCAL && !(cfg.getAffinity() instanceof LocalAffinityFunction)) {
cfg.setAffinity(new LocalAffinityFunction());
U.warn(log, "AffinityFunction configuration parameter will be ignored for local cache" + " [cacheName=" + U.maskName(cfg.getName()) + ']');
}
}
validateKeyConfigiration(cfg.getGroupName(), cfg.getName(), cfg.getKeyConfiguration(), log, true);
if (cfg.getCacheMode() == REPLICATED)
cfg.setBackups(Integer.MAX_VALUE);
if (cfg.getQueryParallelism() > 1 && cfg.getCacheMode() != PARTITIONED)
throw new IgniteCheckedException("Segmented indices are supported for PARTITIONED mode only.");
if (cfg.getAffinityMapper() == null)
cfg.setAffinityMapper(cacheObjCtx.defaultAffMapper());
if (cfg.getRebalanceMode() == null)
cfg.setRebalanceMode(ASYNC);
if (cfg.getAtomicityMode() == null)
cfg.setAtomicityMode(CacheConfiguration.DFLT_CACHE_ATOMICITY_MODE);
if (cfg.getWriteSynchronizationMode() == null)
cfg.setWriteSynchronizationMode(PRIMARY_SYNC);
assert cfg.getWriteSynchronizationMode() != null;
if (cfg.getCacheStoreFactory() == null) {
Factory<CacheLoader> ldrFactory = cfg.getCacheLoaderFactory();
Factory<CacheWriter> writerFactory = cfg.isWriteThrough() ? cfg.getCacheWriterFactory() : null;
if (ldrFactory != null || writerFactory != null)
cfg.setCacheStoreFactory(new GridCacheLoaderWriterStoreFactory(ldrFactory, writerFactory));
} else {
if (cfg.getCacheLoaderFactory() != null)
throw new IgniteCheckedException("Cannot set both cache loaded factory and cache store factory " + "for cache: " + U.maskName(cfg.getName()));
if (cfg.getCacheWriterFactory() != null)
throw new IgniteCheckedException("Cannot set both cache writer factory and cache store factory " + "for cache: " + U.maskName(cfg.getName()));
}
Collection<QueryEntity> entities = cfg.getQueryEntities();
if (!F.isEmpty(entities)) {
cfg.clearQueryEntities().setQueryEntities(QueryUtils.normalizeQueryEntities(cacheObjCtx.kernalContext(), entities, cfg));
}
}
use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class AbstractJdbcPojoQuerySelfTest method getConfiguration.
/**
* {@inheritDoc}
*/
@Override
protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(gridName);
CacheConfiguration<?, ?> cache = defaultCacheConfiguration();
cache.setWriteSynchronizationMode(FULL_SYNC);
cache.setAtomicityMode(TRANSACTIONAL);
cfg.setConnectorConfiguration(new ConnectorConfiguration());
QueryEntity queryEntity = new QueryEntity();
queryEntity.setKeyType("java.lang.String");
queryEntity.setValueType("org.apache.ignite.internal.JdbcTestObject");
queryEntity.addQueryField("id", "java.lang.Integer", null);
queryEntity.addQueryField("testObject", "org.apache.ignite.internal.JdbcTestObject2", null);
queryEntity.setIndexes(Collections.singletonList(new QueryIndex("id")));
cache.setQueryEntities(Collections.singletonList(queryEntity));
cfg.setCacheConfiguration(cache);
return cfg;
}
Aggregations