use of org.apache.ignite.internal.processors.query.schema.SchemaOperationWorker in project ignite by apache.
the class GridQueryProcessor method onCacheStart0.
/**
* Create type descriptors from schema and initialize indexing for given cache.<p>
* Use with {@link #busyLock} where appropriate.
* @param cctx Cache context.
* @param schema Initial schema.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings({ "deprecation", "ThrowableResultOfMethodCallIgnored" })
public void onCacheStart0(GridCacheContext<?, ?> cctx, QuerySchema schema) throws IgniteCheckedException {
cctx.shared().database().checkpointReadLock();
try {
synchronized (stateMux) {
boolean escape = cctx.config().isSqlEscapeAll();
String cacheName = cctx.name();
String schemaName = QueryUtils.normalizeSchemaName(cacheName, cctx.config().getSqlSchema());
// Prepare candidates.
List<Class<?>> mustDeserializeClss = new ArrayList<>();
Collection<QueryTypeCandidate> cands = new ArrayList<>();
Collection<QueryEntity> qryEntities = schema.entities();
if (!F.isEmpty(qryEntities)) {
for (QueryEntity qryEntity : qryEntities) {
QueryTypeCandidate cand = QueryUtils.typeForQueryEntity(cacheName, cctx, qryEntity, mustDeserializeClss, escape);
cands.add(cand);
}
}
// Ensure that candidates has unique index names. Otherwise we will not be able to apply pending operations.
Map<String, QueryTypeDescriptorImpl> tblTypMap = new HashMap<>();
Map<String, QueryTypeDescriptorImpl> idxTypMap = new HashMap<>();
for (QueryTypeCandidate cand : cands) {
QueryTypeDescriptorImpl desc = cand.descriptor();
QueryTypeDescriptorImpl oldDesc = tblTypMap.put(desc.tableName(), desc);
if (oldDesc != null)
throw new IgniteException("Duplicate table name [cache=" + cacheName + ", tblName=" + desc.tableName() + ", type1=" + desc.name() + ", type2=" + oldDesc.name() + ']');
for (String idxName : desc.indexes().keySet()) {
oldDesc = idxTypMap.put(idxName, desc);
if (oldDesc != null)
throw new IgniteException("Duplicate index name [cache=" + cacheName + ", idxName=" + idxName + ", type1=" + desc.name() + ", type2=" + oldDesc.name() + ']');
}
}
// There could be only one in-flight operation for a cache.
for (SchemaOperation op : schemaOps.values()) {
if (F.eq(op.proposeMessage().deploymentId(), cctx.dynamicDeploymentId())) {
if (op.started()) {
SchemaOperationWorker worker = op.manager().worker();
assert !worker.cacheRegistered();
if (!worker.nop()) {
IgniteInternalFuture fut = worker.future();
assert fut.isDone();
if (fut.error() == null) {
SchemaAbstractOperation op0 = op.proposeMessage().operation();
if (op0 instanceof SchemaIndexCreateOperation) {
SchemaIndexCreateOperation opCreate = (SchemaIndexCreateOperation) op0;
QueryTypeDescriptorImpl typeDesc = tblTypMap.get(opCreate.tableName());
assert typeDesc != null;
QueryUtils.processDynamicIndexChange(opCreate.indexName(), opCreate.index(), typeDesc);
} else if (op0 instanceof SchemaIndexDropOperation) {
SchemaIndexDropOperation opDrop = (SchemaIndexDropOperation) op0;
QueryTypeDescriptorImpl typeDesc = idxTypMap.get(opDrop.indexName());
assert typeDesc != null;
QueryUtils.processDynamicIndexChange(opDrop.indexName(), null, typeDesc);
} else
assert false;
}
}
}
break;
}
}
// Ready to register at this point.
registerCache0(cacheName, schemaName, cctx, cands);
// Warn about possible implicit deserialization.
if (!mustDeserializeClss.isEmpty()) {
U.warn(log, "Some classes in query configuration cannot be written in binary format " + "because they either implement Externalizable interface or have writeObject/readObject " + "methods. Instances of these classes will be deserialized in order to build indexes. Please " + "ensure that all nodes have these classes in classpath. To enable binary serialization " + "either implement " + Binarylizable.class.getSimpleName() + " interface or set explicit " + "serializer using BinaryTypeConfiguration.setSerializer() method: " + mustDeserializeClss);
}
}
} finally {
cctx.shared().database().checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.processors.query.schema.SchemaOperationWorker in project ignite by apache.
the class GridQueryProcessor method startSchemaChange.
/**
* Initiate actual schema change operation.
*
* @param schemaOp Schema operation.
*/
@SuppressWarnings({ "unchecked", "ThrowableInstanceNeverThrown" })
private void startSchemaChange(SchemaOperation schemaOp) {
assert Thread.holdsLock(stateMux);
assert !schemaOp.started();
// Get current cache state.
SchemaProposeDiscoveryMessage msg = schemaOp.proposeMessage();
String cacheName = msg.operation().cacheName();
DynamicCacheDescriptor cacheDesc = ctx.cache().cacheDescriptor(cacheName);
boolean cacheExists = cacheDesc != null && F.eq(msg.deploymentId(), cacheDesc.deploymentId());
boolean cacheRegistered = cacheExists && cacheNames.contains(cacheName);
// Validate schema state and decide whether we should proceed or not.
SchemaAbstractOperation op = msg.operation();
QueryTypeDescriptorImpl type = null;
SchemaOperationException err;
boolean nop = false;
if (cacheExists) {
if (cacheRegistered) {
// If cache is started, we perform validation against real schema.
T3<QueryTypeDescriptorImpl, Boolean, SchemaOperationException> res = prepareChangeOnStartedCache(op);
assert res.get2() != null;
type = res.get1();
nop = res.get2();
err = res.get3();
} else {
// If cache is not started yet, there is no schema. Take schema from cache descriptor and validate.
QuerySchema schema = cacheDesc.schema();
T2<Boolean, SchemaOperationException> res = prepareChangeOnNotStartedCache(op, schema);
assert res.get1() != null;
type = null;
nop = res.get1();
err = res.get2();
}
} else
err = new SchemaOperationException(SchemaOperationException.CODE_CACHE_NOT_FOUND, cacheName);
// Start operation.
SchemaOperationWorker worker = new SchemaOperationWorker(ctx, this, msg.deploymentId(), op, nop, err, cacheRegistered, type);
SchemaOperationManager mgr = new SchemaOperationManager(ctx, this, worker, ctx.clientNode() ? null : coordinator());
schemaOp.manager(mgr);
mgr.start();
// Unwind pending IO messages.
if (!ctx.clientNode() && coordinator().isLocal())
unwindPendingMessages(schemaOp.id(), mgr);
// Schedule operation finish handling if needed.
if (schemaOp.hasFinishMessage())
schemaOp.doFinish();
}
Aggregations