use of org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexDropOperation in project ignite by apache.
the class GridQueryProcessor method onCacheStart0.
/**
* Create type descriptors from schema and initialize indexing for given cache.<p>
* Use with {@link #busyLock} where appropriate.
* @param cctx Cache context.
* @param schema Initial schema.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings({ "deprecation", "ThrowableResultOfMethodCallIgnored" })
public void onCacheStart0(GridCacheContext<?, ?> cctx, QuerySchema schema) throws IgniteCheckedException {
cctx.shared().database().checkpointReadLock();
try {
synchronized (stateMux) {
boolean escape = cctx.config().isSqlEscapeAll();
String cacheName = cctx.name();
String schemaName = QueryUtils.normalizeSchemaName(cacheName, cctx.config().getSqlSchema());
// Prepare candidates.
List<Class<?>> mustDeserializeClss = new ArrayList<>();
Collection<QueryTypeCandidate> cands = new ArrayList<>();
Collection<QueryEntity> qryEntities = schema.entities();
if (!F.isEmpty(qryEntities)) {
for (QueryEntity qryEntity : qryEntities) {
QueryTypeCandidate cand = QueryUtils.typeForQueryEntity(cacheName, schemaName, cctx, qryEntity, mustDeserializeClss, escape);
cands.add(cand);
}
}
// Ensure that candidates has unique index names.
// Otherwise we will not be able to apply pending operations.
Map<String, QueryTypeDescriptorImpl> tblTypMap = new HashMap<>();
Map<String, QueryTypeDescriptorImpl> idxTypMap = new HashMap<>();
for (QueryTypeCandidate cand : cands) {
QueryTypeDescriptorImpl desc = cand.descriptor();
QueryTypeDescriptorImpl oldDesc = tblTypMap.put(desc.tableName(), desc);
if (oldDesc != null)
throw new IgniteException("Duplicate table name [cache=" + cacheName + ",tblName=" + desc.tableName() + ", type1=" + desc.name() + ", type2=" + oldDesc.name() + ']');
for (String idxName : desc.indexes().keySet()) {
oldDesc = idxTypMap.put(idxName, desc);
if (oldDesc != null)
throw new IgniteException("Duplicate index name [cache=" + cacheName + ",idxName=" + idxName + ", type1=" + desc.name() + ", type2=" + oldDesc.name() + ']');
}
}
// There could be only one in-flight operation for a cache.
for (SchemaOperation op : schemaOps.values()) {
if (F.eq(op.proposeMessage().deploymentId(), cctx.dynamicDeploymentId())) {
if (op.started()) {
SchemaOperationWorker worker = op.manager().worker();
assert !worker.cacheRegistered();
if (!worker.nop()) {
IgniteInternalFuture fut = worker.future();
assert fut.isDone();
if (fut.error() == null) {
SchemaAbstractOperation op0 = op.proposeMessage().operation();
if (op0 instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation opCreate = (SchemaIndexCreateOperation) op0;
QueryTypeDescriptorImpl typeDesc = tblTypMap.get(opCreate.tableName());
assert typeDesc != null;
QueryUtils.processDynamicIndexChange(opCreate.indexName(), opCreate.index(), typeDesc);
} else if (op0 instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation opDrop = (SchemaIndexDropOperation) op0;
QueryTypeDescriptorImpl typeDesc = idxTypMap.get(opDrop.indexName());
assert typeDesc != null;
QueryUtils.processDynamicIndexChange(opDrop.indexName(), null, typeDesc);
} else if (op0 instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation opAddCol = (SchemaAlterTableAddColumnOperation) op0;
QueryTypeDescriptorImpl typeDesc = tblTypMap.get(opAddCol.tableName());
assert typeDesc != null;
processDynamicAddColumn(typeDesc, opAddCol.columns());
} else if (op0 instanceof SchemaAlterTableDropColumnOperation) {
SchemaAlterTableDropColumnOperation opDropCol = (SchemaAlterTableDropColumnOperation) op0;
QueryTypeDescriptorImpl typeDesc = tblTypMap.get(opDropCol.tableName());
assert typeDesc != null;
processDynamicDropColumn(typeDesc, opDropCol.columns());
} else
assert false;
}
}
}
break;
}
}
// Ready to register at this point.
registerCache0(cacheName, schemaName, cctx, cands);
// Warn about possible implicit deserialization.
if (!mustDeserializeClss.isEmpty()) {
U.warnDevOnly(log, "Some classes in query configuration cannot be written in binary format " + "because they either implement Externalizable interface or have writeObject/readObject " + "methods. Instances of these classes will be deserialized in order to build indexes. Please " + "ensure that all nodes have these classes in classpath. To enable binary serialization " + "either implement " + Binarylizable.class.getSimpleName() + " interface or set explicit " + "serializer using BinaryTypeConfiguration.setSerializer() method: " + mustDeserializeClss);
}
}
} finally {
cctx.shared().database().checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexDropOperation in project ignite by apache.
the class GridQueryProcessor method prepareChangeOnNotStartedCache.
/**
* Prepare operation on non-started cache.
*
* @param op Operation.
* @param schema Known cache schema.
* @return Result: nop flag, error.
*/
private T2<Boolean, SchemaOperationException> prepareChangeOnNotStartedCache(SchemaAbstractOperation op, QuerySchema schema) {
boolean nop = false;
SchemaOperationException err = null;
// Build table and index maps.
Map<String, QueryEntity> tblMap = new HashMap<>();
Map<String, T2<QueryEntity, QueryIndex>> idxMap = new HashMap<>();
for (QueryEntity entity : schema.entities()) {
String tblName = entity.getTableName();
QueryEntity oldEntity = tblMap.put(tblName, entity);
if (oldEntity != null) {
err = new SchemaOperationException("Invalid schema state (duplicate table found): " + tblName);
break;
}
for (QueryIndex entityIdx : entity.getIndexes()) {
String idxName = entityIdx.getName();
T2<QueryEntity, QueryIndex> oldIdxEntity = idxMap.put(idxName, new T2<>(entity, entityIdx));
if (oldIdxEntity != null) {
err = new SchemaOperationException("Invalid schema state (duplicate index found): " + idxName);
break;
}
}
if (err != null)
break;
}
// Now check whether operation can be applied to schema.
if (op instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation op0 = (SchemaIndexCreateOperation) op;
String idxName = op0.indexName();
T2<QueryEntity, QueryIndex> oldIdxEntity = idxMap.get(idxName);
if (oldIdxEntity == null) {
String tblName = op0.tableName();
QueryEntity oldEntity = tblMap.get(tblName);
if (oldEntity == null)
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, tblName);
else {
for (String fieldName : op0.index().getFields().keySet()) {
Set<String> oldEntityFields = new HashSet<>(oldEntity.getFields().keySet());
for (Map.Entry<String, String> alias : oldEntity.getAliases().entrySet()) {
oldEntityFields.remove(alias.getKey());
oldEntityFields.add(alias.getValue());
}
if (!oldEntityFields.contains(fieldName)) {
err = new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, fieldName);
break;
}
}
}
} else {
if (op0.ifNotExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_INDEX_EXISTS, idxName);
}
} else if (op instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation op0 = (SchemaIndexDropOperation) op;
String idxName = op0.indexName();
T2<QueryEntity, QueryIndex> oldIdxEntity = idxMap.get(idxName);
if (oldIdxEntity == null) {
if (op0.ifExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, idxName);
}
} else if (op instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation op0 = (SchemaAlterTableAddColumnOperation) op;
QueryEntity e = tblMap.get(op0.tableName());
if (e == null) {
if (op0.ifTableExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, op0.tableName());
} else {
for (QueryField fld : op0.columns()) {
if (e.getFields().containsKey(fld.name())) {
if (op0.ifNotExists()) {
assert op0.columns().size() == 1;
nop = true;
} else
err = new SchemaOperationException(SchemaOperationException.CODE_COLUMN_EXISTS, fld.name());
}
}
}
} else if (op instanceof SchemaAlterTableDropColumnOperation) {
SchemaAlterTableDropColumnOperation op0 = (SchemaAlterTableDropColumnOperation) op;
QueryEntity e = tblMap.get(op0.tableName());
if (e == null) {
if (op0.ifTableExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, op0.tableName());
} else {
Map<String, String> aliases = e.getAliases();
for (String colName : op0.columns()) {
if (err != null)
break;
String fldName = colName;
if (!F.isEmpty(aliases)) {
for (Map.Entry<String, String> a : aliases.entrySet()) {
if (colName.equals(a.getValue())) {
fldName = a.getKey();
break;
}
}
}
if (!e.getFields().containsKey(fldName)) {
if (op0.ifExists()) {
assert op0.columns().size() == 1;
nop = true;
} else
err = new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, fldName);
break;
}
err = QueryUtils.validateDropColumn(e, fldName, colName);
}
}
} else
err = new SchemaOperationException("Unsupported operation: " + op);
return new T2<>(nop, err);
}
use of org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexDropOperation in project ignite by apache.
the class QuerySchema method finish.
/**
* Process finish message.
*
* @param msg Message.
*/
public void finish(SchemaFinishDiscoveryMessage msg) {
synchronized (mux) {
SchemaAbstractOperation op = msg.operation();
if (op instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation op0 = (SchemaIndexCreateOperation) op;
for (QueryEntity entity : entities) {
String tblName = entity.getTableName();
if (F.eq(tblName, op0.tableName())) {
boolean exists = false;
for (QueryIndex idx : entity.getIndexes()) {
if (F.eq(idx.getName(), op0.indexName())) {
exists = true;
break;
}
}
if (!exists) {
List<QueryIndex> idxs = new ArrayList<>(entity.getIndexes());
idxs.add(op0.index());
entity.setIndexes(idxs);
}
break;
}
}
} else if (op instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation op0 = (SchemaIndexDropOperation) op;
for (QueryEntity entity : entities) {
Collection<QueryIndex> idxs = entity.getIndexes();
QueryIndex victim = null;
for (QueryIndex idx : idxs) {
if (F.eq(idx.getName(), op0.indexName())) {
victim = idx;
break;
}
}
if (victim != null) {
List<QueryIndex> newIdxs = new ArrayList<>(entity.getIndexes());
newIdxs.remove(victim);
entity.setIndexes(newIdxs);
break;
}
}
} else if (op instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation op0 = (SchemaAlterTableAddColumnOperation) op;
int targetIdx = -1;
for (int i = 0; i < entities.size(); i++) {
QueryEntity entity = ((List<QueryEntity>) entities).get(i);
if (F.eq(entity.getTableName(), op0.tableName())) {
targetIdx = i;
break;
}
}
if (targetIdx == -1)
return;
boolean replaceTarget = false;
QueryEntity target = ((List<QueryEntity>) entities).get(targetIdx);
for (QueryField field : op0.columns()) {
target.getFields().put(field.name(), field.typeName());
if (!field.isNullable()) {
if (!(target instanceof QueryEntityEx)) {
target = new QueryEntityEx(target);
replaceTarget = true;
}
QueryEntityEx target0 = (QueryEntityEx) target;
Set<String> notNullFields = target0.getNotNullFields();
if (notNullFields == null) {
notNullFields = new HashSet<>();
target0.setNotNullFields(notNullFields);
}
notNullFields.add(field.name());
}
}
if (replaceTarget)
((List<QueryEntity>) entities).set(targetIdx, target);
} else {
assert op instanceof SchemaAlterTableDropColumnOperation;
SchemaAlterTableDropColumnOperation op0 = (SchemaAlterTableDropColumnOperation) op;
int targetIdx = -1;
for (int i = 0; i < entities.size(); i++) {
QueryEntity entity = ((List<QueryEntity>) entities).get(i);
if (F.eq(entity.getTableName(), op0.tableName())) {
targetIdx = i;
break;
}
}
if (targetIdx == -1)
return;
QueryEntity entity = ((List<QueryEntity>) entities).get(targetIdx);
for (String field : op0.columns()) entity.getFields().remove(field);
}
}
}
use of org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexDropOperation in project ignite by apache.
the class GridQueryProcessor method processSchemaOperationLocal.
/**
* Process schema operation.
*
* @param op Operation.
* @param type Type descriptor.
* @param depId Cache deployment ID.
* @param cancelTok Cancel token.
* @throws SchemaOperationException If failed.
*/
public void processSchemaOperationLocal(SchemaAbstractOperation op, QueryTypeDescriptorImpl type, IgniteUuid depId, IndexRebuildCancelToken cancelTok) throws SchemaOperationException {
if (log.isDebugEnabled())
log.debug("Started local index operation [opId=" + op.id() + ']');
String cacheName = op.cacheName();
GridCacheContextInfo<?, ?> cacheInfo = null;
if (op instanceof SchemaAddQueryEntityOperation) {
GridCacheContext<?, ?> cctx = ctx.cache().context().cacheContext(CU.cacheId(cacheName));
if (cctx != null)
cacheInfo = new GridCacheContextInfo<>(cctx, false);
else
return;
} else
cacheInfo = idx.registeredCacheInfo(cacheName);
if (cacheInfo == null || !F.eq(depId, cacheInfo.dynamicDeploymentId()))
throw new SchemaOperationException(SchemaOperationException.CODE_CACHE_NOT_FOUND, cacheName);
try {
if (op instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation op0 = (SchemaIndexCreateOperation) op;
QueryIndexDescriptorImpl idxDesc = QueryUtils.createIndexDescriptor(type, op0.index());
SchemaIndexCacheVisitor visitor;
if (cacheInfo.isCacheContextInited()) {
int buildIdxPoolSize = ctx.config().getBuildIndexThreadPoolSize();
int parallel = op0.parallel();
if (parallel > buildIdxPoolSize) {
String idxName = op0.indexName();
log.warning("Provided parallelism " + parallel + " for creation of index " + idxName + " is greater than the number of index building threads. Will use " + buildIdxPoolSize + " threads to build index. Increase by IgniteConfiguration.setBuildIndexThreadPoolSize" + " and restart the node if you want to use more threads. [tableName=" + op0.tableName() + ", indexName=" + idxName + ", requestedParallelism=" + parallel + ", buildIndexPoolSize=" + buildIdxPoolSize + "]");
}
GridFutureAdapter<Void> createIdxFut = new GridFutureAdapter<>();
GridCacheContext<?, ?> cacheCtx = cacheInfo.cacheContext();
visitor = new SchemaIndexCacheVisitorImpl(cacheCtx, cancelTok, createIdxFut) {
/**
* {@inheritDoc}
*/
@Override
public void visit(SchemaIndexCacheVisitorClosure clo) {
idxBuildStatusStorage.onStartBuildNewIndex(cacheCtx);
try {
super.visit(clo);
buildIdxFut.get();
} catch (Exception e) {
throw new IgniteException(e);
} finally {
idxBuildStatusStorage.onFinishBuildNewIndex(cacheName);
}
}
};
} else
// For not started caches we shouldn't add any data to index.
visitor = clo -> {
};
idx.dynamicIndexCreate(op0.schemaName(), op0.tableName(), idxDesc, op0.ifNotExists(), visitor);
} else if (op instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation op0 = (SchemaIndexDropOperation) op;
idx.dynamicIndexDrop(op0.schemaName(), op0.indexName(), op0.ifExists());
} else if (op instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation op0 = (SchemaAlterTableAddColumnOperation) op;
processDynamicAddColumn(type, op0.columns());
idx.dynamicAddColumn(op0.schemaName(), op0.tableName(), op0.columns(), op0.ifTableExists(), op0.ifNotExists());
} else if (op instanceof SchemaAlterTableDropColumnOperation) {
SchemaAlterTableDropColumnOperation op0 = (SchemaAlterTableDropColumnOperation) op;
processDynamicDropColumn(type, op0.columns());
idx.dynamicDropColumn(op0.schemaName(), op0.tableName(), op0.columns(), op0.ifTableExists(), op0.ifExists());
} else if (op instanceof SchemaAddQueryEntityOperation) {
SchemaAddQueryEntityOperation op0 = (SchemaAddQueryEntityOperation) op;
if (!cacheNames.contains(op0.cacheName())) {
cacheInfo.onSchemaAddQueryEntity(op0);
T3<Collection<QueryTypeCandidate>, Map<String, QueryTypeDescriptorImpl>, Map<String, QueryTypeDescriptorImpl>> candRes = createQueryCandidates(op0.cacheName(), op0.schemaName(), cacheInfo, op0.entities(), op0.isSqlEscape());
registerCache0(op0.cacheName(), op.schemaName(), cacheInfo, candRes.get1(), false);
}
if (idxRebuildFutStorage.prepareRebuildIndexes(singleton(cacheInfo.cacheId()), null).isEmpty())
rebuildIndexesFromHash0(cacheInfo.cacheContext(), false, cancelTok);
else {
if (log.isInfoEnabled())
log.info("Rebuilding indexes for the cache is already in progress: " + cacheInfo.name());
}
} else
throw new SchemaOperationException("Unsupported operation: " + op);
} catch (Throwable e) {
if (e instanceof SchemaOperationException)
throw (SchemaOperationException) e;
else
throw new SchemaOperationException("Schema change operation failed: " + e.getMessage(), e);
}
}
use of org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexDropOperation in project ignite by apache.
the class GridQueryProcessor method prepareChangeOnStartedCache.
/**
* Prepare change on started cache.
*
* @param op Operation.
* @return Result: affected type, nop flag, error.
*/
private T3<QueryTypeDescriptorImpl, Boolean, SchemaOperationException> prepareChangeOnStartedCache(SchemaAbstractOperation op) {
QueryTypeDescriptorImpl type = null;
boolean nop = false;
SchemaOperationException err = null;
String cacheName = op.cacheName();
if (op instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation op0 = (SchemaIndexCreateOperation) op;
QueryIndex idx = op0.index();
// Make sure table exists.
String tblName = op0.tableName();
type = type(cacheName, tblName);
if (type == null)
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, tblName);
else {
// Make sure that index can be applied to the given table.
for (String idxField : idx.getFieldNames()) {
if (!type.fields().containsKey(idxField)) {
err = new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, idxField);
break;
}
}
}
// Check conflict with other indexes.
if (err == null) {
String idxName = op0.index().getName();
QueryIndexKey idxKey = new QueryIndexKey(op.schemaName(), idxName);
if (idxs.get(idxKey) != null) {
if (op0.ifNotExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_INDEX_EXISTS, idxName);
}
}
} else if (op instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation op0 = (SchemaIndexDropOperation) op;
String idxName = op0.indexName();
QueryIndexDescriptorImpl oldIdx = idxs.get(new QueryIndexKey(op.schemaName(), idxName));
if (oldIdx == null) {
if (op0.ifExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, idxName);
} else
type = oldIdx.typeDescriptor();
} else if (op instanceof SchemaAlterTableAddColumnOperation) {
SchemaAlterTableAddColumnOperation op0 = (SchemaAlterTableAddColumnOperation) op;
type = type(cacheName, op0.tableName());
if (type == null) {
if (op0.ifTableExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, op0.tableName());
} else {
for (QueryField col : op0.columns()) {
if (type.hasField(col.name())) {
if (op0.ifNotExists()) {
assert op0.columns().size() == 1;
nop = true;
} else
err = new SchemaOperationException(CODE_COLUMN_EXISTS, col.name());
} else if (!checkFieldOnBinaryType(type.typeId(), col))
err = new SchemaOperationException(CODE_COLUMN_EXISTS, "with a different type.");
}
}
} else if (op instanceof SchemaAlterTableDropColumnOperation) {
SchemaAlterTableDropColumnOperation op0 = (SchemaAlterTableDropColumnOperation) op;
type = type(cacheName, op0.tableName());
if (type == null) {
if (op0.ifTableExists())
nop = true;
else
err = new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, op0.tableName());
} else {
for (String name : op0.columns()) {
if (err != null)
break;
if (!type.hasField(name)) {
if (op0.ifExists()) {
assert op0.columns().size() == 1;
nop = true;
} else
err = new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, name);
break;
}
err = QueryUtils.validateDropColumn(type, name);
}
}
} else if (op instanceof SchemaAddQueryEntityOperation) {
if (cacheNames.contains(op.cacheName()))
err = new SchemaOperationException(SchemaOperationException.CODE_CACHE_ALREADY_INDEXED, op.cacheName());
} else
err = new SchemaOperationException("Unsupported operation: " + op);
return new T3<>(type, nop, err);
}
Aggregations