use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class AbstractSchemaSelfTest method assertIndexDescriptor.
/**
* Make sure index exists in cache descriptor.
*
* @param node Node.
* @param cacheName Cache name.
* @param tblName Table name.
* @param idxName Index name.
* @param fields Fields.
*/
protected static void assertIndexDescriptor(IgniteEx node, String cacheName, String tblName, String idxName, IgniteBiTuple<String, Boolean>... fields) {
awaitCompletion();
DynamicCacheDescriptor desc = node.context().cache().cacheDescriptor(cacheName);
assert desc != null;
for (QueryEntity entity : desc.schema().entities()) {
if (F.eq(tblName, entity.getTableName())) {
for (QueryIndex idx : entity.getIndexes()) {
if (F.eq(QueryUtils.indexName(entity, idx), idxName)) {
LinkedHashMap<String, Boolean> idxFields = idx.getFields();
assertEquals(idxFields.size(), fields.length);
int i = 0;
for (String idxField : idxFields.keySet()) {
assertEquals(idxField.toLowerCase(), fields[i].get1().toLowerCase());
assertEquals(idxFields.get(idxField), fields[i].get2());
i++;
}
return;
}
}
}
}
fail("Index not found [node=" + node.name() + ", cacheName=" + cacheName + ", tlbName=" + tblName + ", idxName=" + idxName + ']');
}
use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class CacheClientBinaryQueryExample method createEmployeeQueryEntity.
/**
* Create cache type metadata for {@link Employee}.
*
* @return Cache type metadata.
*/
private static QueryEntity createEmployeeQueryEntity() {
QueryEntity employeeEntity = new QueryEntity();
employeeEntity.setValueType(Employee.class.getName());
employeeEntity.setKeyType(EmployeeKey.class.getName());
LinkedHashMap<String, String> fields = new LinkedHashMap<>();
fields.put("name", String.class.getName());
fields.put("salary", Long.class.getName());
fields.put("addr.zip", Integer.class.getName());
fields.put("organizationId", Integer.class.getName());
fields.put("addr.street", Integer.class.getName());
employeeEntity.setFields(fields);
employeeEntity.setIndexes(Arrays.asList(new QueryIndex("name"), new QueryIndex("salary"), new QueryIndex("addr.zip"), new QueryIndex("organizationId"), new QueryIndex("addr.street", QueryIndexType.FULLTEXT)));
return employeeEntity;
}
use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class GridQueryProcessor method onCacheStart0.
/**
* Create type descriptors from schema and initialize indexing for given cache.<p>
* Use with {@link #busyLock} where appropriate.
* @param cctx Cache context.
* @param schema Initial schema.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings({ "deprecation", "ThrowableResultOfMethodCallIgnored" })
public void onCacheStart0(GridCacheContext<?, ?> cctx, QuerySchema schema) throws IgniteCheckedException {
cctx.shared().database().checkpointReadLock();
try {
synchronized (stateMux) {
boolean escape = cctx.config().isSqlEscapeAll();
String cacheName = cctx.name();
String schemaName = QueryUtils.normalizeSchemaName(cacheName, cctx.config().getSqlSchema());
// Prepare candidates.
List<Class<?>> mustDeserializeClss = new ArrayList<>();
Collection<QueryTypeCandidate> cands = new ArrayList<>();
Collection<QueryEntity> qryEntities = schema.entities();
if (!F.isEmpty(qryEntities)) {
for (QueryEntity qryEntity : qryEntities) {
QueryTypeCandidate cand = QueryUtils.typeForQueryEntity(cacheName, schemaName, cctx, qryEntity, mustDeserializeClss, escape);
cands.add(cand);
}
}
// Ensure that candidates has unique index names.
// Otherwise we will not be able to apply pending operations.
Map<String, QueryTypeDescriptorImpl> tblTypMap = new HashMap<>();
Map<String, QueryTypeDescriptorImpl> idxTypMap = new HashMap<>();
for (QueryTypeCandidate cand : cands) {
QueryTypeDescriptorImpl desc = cand.descriptor();
QueryTypeDescriptorImpl oldDesc = tblTypMap.put(desc.tableName(), desc);
if (oldDesc != null)
throw new IgniteException("Duplicate table name [cache=" + cacheName + ",tblName=" + desc.tableName() + ", type1=" + desc.name() + ", type2=" + oldDesc.name() + ']');
for (String idxName : desc.indexes().keySet()) {
oldDesc = idxTypMap.put(idxName, desc);
if (oldDesc != null)
throw new IgniteException("Duplicate index name [cache=" + cacheName + ",idxName=" + idxName + ", type1=" + desc.name() + ", type2=" + oldDesc.name() + ']');
}
}
// There could be only one in-flight operation for a cache.
for (SchemaOperation op : schemaOps.values()) {
if (F.eq(op.proposeMessage().deploymentId(), cctx.dynamicDeploymentId())) {
if (op.started()) {
SchemaOperationWorker worker = op.manager().worker();
assert !worker.cacheRegistered();
if (!worker.nop()) {
IgniteInternalFuture fut = worker.future();
assert fut.isDone();
if (fut.error() == null) {
SchemaAbstractOperation op0 = op.proposeMessage().operation();
if (op0 instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation opCreate = (SchemaIndexCreateOperation) op0;
QueryTypeDescriptorImpl typeDesc = tblTypMap.get(opCreate.tableName());
assert typeDesc != null;
QueryUtils.processDynamicIndexChange(opCreate.indexName(), opCreate.index(), typeDesc);
} else if (op0 instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation opDrop = (SchemaIndexDropOperation) op0;
QueryTypeDescriptorImpl typeDesc = idxTypMap.get(opDrop.indexName());
assert typeDesc != null;
QueryUtils.processDynamicIndexChange(opDrop.indexName(), null, typeDesc);
} else if (op0 instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation opAddCol = (SchemaAlterTableAddColumnOperation) op0;
QueryTypeDescriptorImpl typeDesc = tblTypMap.get(opAddCol.tableName());
assert typeDesc != null;
processDynamicAddColumn(typeDesc, opAddCol.columns());
} else if (op0 instanceof SchemaAlterTableDropColumnOperation) {
SchemaAlterTableDropColumnOperation opDropCol = (SchemaAlterTableDropColumnOperation) op0;
QueryTypeDescriptorImpl typeDesc = tblTypMap.get(opDropCol.tableName());
assert typeDesc != null;
processDynamicDropColumn(typeDesc, opDropCol.columns());
} else
assert false;
}
}
}
break;
}
}
// Ready to register at this point.
registerCache0(cacheName, schemaName, cctx, cands);
// Warn about possible implicit deserialization.
if (!mustDeserializeClss.isEmpty()) {
U.warnDevOnly(log, "Some classes in query configuration cannot be written in binary format " + "because they either implement Externalizable interface or have writeObject/readObject " + "methods. Instances of these classes will be deserialized in order to build indexes. Please " + "ensure that all nodes have these classes in classpath. To enable binary serialization " + "either implement " + Binarylizable.class.getSimpleName() + " interface or set explicit " + "serializer using BinaryTypeConfiguration.setSerializer() method: " + mustDeserializeClss);
}
}
} finally {
cctx.shared().database().checkpointReadUnlock();
}
}
use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class GridQueryProcessor method prepareChangeOnNotStartedCache.
/**
* Prepare operation on non-started cache.
*
* @param op Operation.
* @param schema Known cache schema.
* @return Result: nop flag, error.
*/
private T2<Boolean, SchemaOperationException> prepareChangeOnNotStartedCache(SchemaAbstractOperation op, QuerySchema schema) {
boolean nop = false;
SchemaOperationException err = null;
// Build table and index maps.
Map<String, QueryEntity> tblMap = new HashMap<>();
Map<String, T2<QueryEntity, QueryIndex>> idxMap = new HashMap<>();
for (QueryEntity entity : schema.entities()) {
String tblName = entity.getTableName();
QueryEntity oldEntity = tblMap.put(tblName, entity);
if (oldEntity != null) {
err = new SchemaOperationException("Invalid schema state (duplicate table found): " + tblName);
break;
}
for (QueryIndex entityIdx : entity.getIndexes()) {
String idxName = entityIdx.getName();
T2<QueryEntity, QueryIndex> oldIdxEntity = idxMap.put(idxName, new T2<>(entity, entityIdx));
if (oldIdxEntity != null) {
err = new SchemaOperationException("Invalid schema state (duplicate index found): " + idxName);
break;
}
}
if (err != null)
break;
}
// Now check whether operation can be applied to schema.
if (op instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation op0 = (SchemaIndexCreateOperation) op;
String idxName = op0.indexName();
T2<QueryEntity, QueryIndex> oldIdxEntity = idxMap.get(idxName);
if (oldIdxEntity == null) {
String tblName = op0.tableName();
QueryEntity oldEntity = tblMap.get(tblName);
if (oldEntity == null)
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, tblName);
else {
for (String fieldName : op0.index().getFields().keySet()) {
Set<String> oldEntityFields = new HashSet<>(oldEntity.getFields().keySet());
for (Map.Entry<String, String> alias : oldEntity.getAliases().entrySet()) {
oldEntityFields.remove(alias.getKey());
oldEntityFields.add(alias.getValue());
}
if (!oldEntityFields.contains(fieldName)) {
err = new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, fieldName);
break;
}
}
}
} else {
if (op0.ifNotExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_INDEX_EXISTS, idxName);
}
} else if (op instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation op0 = (SchemaIndexDropOperation) op;
String idxName = op0.indexName();
T2<QueryEntity, QueryIndex> oldIdxEntity = idxMap.get(idxName);
if (oldIdxEntity == null) {
if (op0.ifExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, idxName);
}
} else if (op instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation op0 = (SchemaAlterTableAddColumnOperation) op;
QueryEntity e = tblMap.get(op0.tableName());
if (e == null) {
if (op0.ifTableExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, op0.tableName());
} else {
for (QueryField fld : op0.columns()) {
if (e.getFields().containsKey(fld.name())) {
if (op0.ifNotExists()) {
assert op0.columns().size() == 1;
nop = true;
} else
err = new SchemaOperationException(SchemaOperationException.CODE_COLUMN_EXISTS, fld.name());
}
}
}
} else if (op instanceof SchemaAlterTableDropColumnOperation) {
SchemaAlterTableDropColumnOperation op0 = (SchemaAlterTableDropColumnOperation) op;
QueryEntity e = tblMap.get(op0.tableName());
if (e == null) {
if (op0.ifTableExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, op0.tableName());
} else {
Map<String, String> aliases = e.getAliases();
for (String colName : op0.columns()) {
if (err != null)
break;
String fldName = colName;
if (!F.isEmpty(aliases)) {
for (Map.Entry<String, String> a : aliases.entrySet()) {
if (colName.equals(a.getValue())) {
fldName = a.getKey();
break;
}
}
}
if (!e.getFields().containsKey(fldName)) {
if (op0.ifExists()) {
assert op0.columns().size() == 1;
nop = true;
} else
err = new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, fldName);
break;
}
err = QueryUtils.validateDropColumn(e, fldName, colName);
}
}
} else
err = new SchemaOperationException("Unsupported operation: " + op);
return new T2<>(nop, err);
}
use of org.apache.ignite.cache.QueryEntity in project ignite by apache.
the class QuerySchema method finish.
/**
* Process finish message.
*
* @param msg Message.
*/
public void finish(SchemaFinishDiscoveryMessage msg) {
synchronized (mux) {
SchemaAbstractOperation op = msg.operation();
if (op instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation op0 = (SchemaIndexCreateOperation) op;
for (QueryEntity entity : entities) {
String tblName = entity.getTableName();
if (F.eq(tblName, op0.tableName())) {
boolean exists = false;
for (QueryIndex idx : entity.getIndexes()) {
if (F.eq(idx.getName(), op0.indexName())) {
exists = true;
break;
}
}
if (!exists) {
List<QueryIndex> idxs = new ArrayList<>(entity.getIndexes());
idxs.add(op0.index());
entity.setIndexes(idxs);
}
break;
}
}
} else if (op instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation op0 = (SchemaIndexDropOperation) op;
for (QueryEntity entity : entities) {
Collection<QueryIndex> idxs = entity.getIndexes();
QueryIndex victim = null;
for (QueryIndex idx : idxs) {
if (F.eq(idx.getName(), op0.indexName())) {
victim = idx;
break;
}
}
if (victim != null) {
List<QueryIndex> newIdxs = new ArrayList<>(entity.getIndexes());
newIdxs.remove(victim);
entity.setIndexes(newIdxs);
break;
}
}
} else if (op instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation op0 = (SchemaAlterTableAddColumnOperation) op;
int targetIdx = -1;
for (int i = 0; i < entities.size(); i++) {
QueryEntity entity = ((List<QueryEntity>) entities).get(i);
if (F.eq(entity.getTableName(), op0.tableName())) {
targetIdx = i;
break;
}
}
if (targetIdx == -1)
return;
boolean replaceTarget = false;
QueryEntity target = ((List<QueryEntity>) entities).get(targetIdx);
for (QueryField field : op0.columns()) {
target.getFields().put(field.name(), field.typeName());
if (!field.isNullable()) {
if (!(target instanceof QueryEntityEx)) {
target = new QueryEntityEx(target);
replaceTarget = true;
}
QueryEntityEx target0 = (QueryEntityEx) target;
Set<String> notNullFields = target0.getNotNullFields();
if (notNullFields == null) {
notNullFields = new HashSet<>();
target0.setNotNullFields(notNullFields);
}
notNullFields.add(field.name());
}
}
if (replaceTarget)
((List<QueryEntity>) entities).set(targetIdx, target);
} else {
assert op instanceof SchemaAlterTableDropColumnOperation;
SchemaAlterTableDropColumnOperation op0 = (SchemaAlterTableDropColumnOperation) op;
int targetIdx = -1;
for (int i = 0; i < entities.size(); i++) {
QueryEntity entity = ((List<QueryEntity>) entities).get(i);
if (F.eq(entity.getTableName(), op0.tableName())) {
targetIdx = i;
break;
}
}
if (targetIdx == -1)
return;
QueryEntity entity = ((List<QueryEntity>) entities).get(targetIdx);
for (String field : op0.columns()) entity.getFields().remove(field);
}
}
}
Aggregations