use of org.apache.derby.catalog.UUID in project derby by apache.
the class DataDictionaryImpl method createSystemProcedureOrFunction.
/**
* Generic create procedure routine.
* <p>
* Takes the input procedure and inserts it into the appropriate
* catalog.
*
* Assumes all arguments are "IN" type.
*
* @param routine_name name of the routine in java and the SQL
* procedure name.
*
* @param arg_names String array of procedure argument names in order.
*
* @param arg_types Internal SQL types of the arguments
*
* @param routine_sql_control
* One of the RoutineAliasInfo constants:
* MODIFIES_SQL_DATA
* READS_SQL_DATA
* CONTAINS_SQL
* NO_SQL
*
* @param isDeterministic True if the procedure/function is DETERMINISTIC
*
* @param return_type null for procedure. For functions the return type
* of the function.
*
* @param newlyCreatedRoutines evolving set of routines, some of which may need permissions later on
* @param tc an instance of the TransactionController
*
* @param procClass the fully qualified name of the class that contains
* java definitions for the stored procedures
*
* @return UUID UUID of system routine that got created.
*
* @exception StandardException Standard exception policy.
*/
private final UUID createSystemProcedureOrFunction(String routine_name, UUID schema_uuid, String[] arg_names, TypeDescriptor[] arg_types, int num_out_param, int num_result_sets, short routine_sql_control, boolean isDeterministic, boolean hasVarargs, TypeDescriptor return_type, HashSet<String> newlyCreatedRoutines, TransactionController tc, String procClass) throws StandardException {
int num_args = 0;
if (arg_names != null)
num_args = arg_names.length;
if (SanityManager.DEBUG) {
if (num_args != 0) {
SanityManager.ASSERT(arg_names != null);
SanityManager.ASSERT(arg_types != null);
SanityManager.ASSERT(arg_names.length == arg_types.length);
}
}
// all args are only "in" arguments
int[] arg_modes = null;
if (num_args != 0) {
arg_modes = new int[num_args];
int num_in_param = num_args - num_out_param;
for (int i = 0; i < num_in_param; i++) arg_modes[i] = (ParameterMetaData.parameterModeIn);
for (int i = 0; i < num_out_param; i++) arg_modes[num_in_param + i] = (ParameterMetaData.parameterModeOut);
}
RoutineAliasInfo routine_alias_info = new RoutineAliasInfo(// name of routine
routine_name, // number of params
num_args, // names of params
arg_names, // types of params
arg_types, // all "IN" params
arg_modes, // number of result sets
num_result_sets, // link to java routine
RoutineAliasInfo.PS_JAVA, // one of:
routine_sql_control, // whether the procedure/function is DETERMINISTIC
isDeterministic, // whether the procedure/function has VARARGS
hasVarargs, // not definer's rights
false, // true - calledOnNullInput
true, return_type);
UUID routine_uuid = getUUIDFactory().createUUID();
AliasDescriptor ads = new AliasDescriptor(this, routine_uuid, routine_name, schema_uuid, procClass, (return_type == null) ? AliasInfo.ALIAS_TYPE_PROCEDURE_AS_CHAR : AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR, (return_type == null) ? AliasInfo.ALIAS_NAME_SPACE_PROCEDURE_AS_CHAR : AliasInfo.ALIAS_NAME_SPACE_FUNCTION_AS_CHAR, false, routine_alias_info, null);
addDescriptor(ads, null, DataDictionary.SYSALIASES_CATALOG_NUM, false, tc);
newlyCreatedRoutines.add(routine_name);
return routine_uuid;
}
use of org.apache.derby.catalog.UUID in project derby by apache.
the class DataDictionaryImpl method dropStoredDependency.
/**
* Drop a single dependency from the data dictionary.
*
* @param dd The DependencyDescriptor.
* @param tc TransactionController for the transaction
*
* @exception StandardException Thrown on failure
*/
public void dropStoredDependency(DependencyDescriptor dd, TransactionController tc) throws StandardException {
ExecIndexRow keyRow1 = null;
UUID dependentID = dd.getUUID();
UUID providerID = dd.getProviderID();
DataValueDescriptor dependentIDOrderable = getIDValueAsCHAR(dependentID);
TabInfoImpl ti = getNonCoreTI(SYSDEPENDS_CATALOG_NUM);
/* Use dependentIDOrderable in both start
* and stop position for index 1 scan.
*/
keyRow1 = (ExecIndexRow) exFactory.getIndexableRow(1);
keyRow1.setColumn(1, dependentIDOrderable);
// only drop the rows which have this providerID
TupleFilter filter = new DropDependencyFilter(providerID);
ti.deleteRows(tc, // start row
keyRow1, ScanController.GE, // qualifier
null, // filter on base row
filter, // stop row
keyRow1, ScanController.GT, SYSDEPENDSRowFactory.SYSDEPENDS_INDEX1_ID);
}
use of org.apache.derby.catalog.UUID in project derby by apache.
the class DataDictionaryImpl method getSPSDescriptor.
/**
* Get a SPSDescriptor given its name.
* Currently no cacheing. With caching
* we need to be very careful about invalidation.
* No caching means invalidations block on
* existing SPSD instances (since they were read in
*
* @param stmtName the statement name
* @param sd The SchemaDescriptor
*
* @return The SPSDescriptor for the constraint.
*
* @exception StandardException Thrown on failure
*/
public SPSDescriptor getSPSDescriptor(String stmtName, SchemaDescriptor sd) throws StandardException {
SPSDescriptor sps = null;
TableKey stmtKey;
UUID schemaUUID;
/*
** If we didn't get a schema descriptor, we had better
** have a system table.
*/
if (SanityManager.DEBUG) {
if (sd == null) {
SanityManager.THROWASSERT("null schema for statement " + stmtName);
}
}
schemaUUID = sd.getUUID();
/* Only use the cache if we're in compile-only mode */
if ((spsNameCache != null) && (getCacheMode() == DataDictionary.COMPILE_ONLY_MODE)) {
stmtKey = new TableKey(schemaUUID, stmtName);
SPSNameCacheable cacheEntry = (SPSNameCacheable) spsNameCache.find(stmtKey);
if (cacheEntry != null) {
sps = cacheEntry.getSPSDescriptor();
spsNameCache.release(cacheEntry);
}
// System.out.println("stmt text " + sps.getText());
return sps;
}
return getSPSDescriptorIndex1Scan(stmtName, schemaUUID.toString());
}
use of org.apache.derby.catalog.UUID in project derby by apache.
the class DataDictionaryImpl method boot.
/**
* Start-up method for this instance of the data dictionary.
*
* @param startParams The start-up parameters
*
* @exception StandardException Thrown if the module fails to start
*/
public void boot(boolean create, Properties startParams) throws StandardException {
softwareVersion = new DD_Version(this, DataDictionary.DD_VERSION_DERBY_10_15);
startupParameters = startParams;
uuidFactory = getMonitor().getUUIDFactory();
engineType = Monitor.getEngineType(startParams);
// Set the collation type of system schemas before we start loading
// built-in schemas's SchemaDescriptor(s). This is because
// SchemaDescriptor will look to DataDictionary to get the correct
// collation type for themselves. We can't load SD for SESSION schema
// just yet because we do not know the collation type for user schemas
// yet. We will know the right collation for user schema little later
// in this boot method.
collationTypeOfSystemSchemas = StringDataValue.COLLATION_TYPE_UCS_BASIC;
getBuiltinSystemSchemas();
// REMIND: actually, we're supposed to get the DataValueFactory
// out of the connection context...this is a bit of a shortcut.
// We get the DataValueFactory early in order to help bootstrap the system catalogs.
LanguageConnectionFactory langConnFactory = (LanguageConnectionFactory) bootServiceModule(create, this, LanguageConnectionFactory.MODULE, startParams);
dvf = langConnFactory.getDataValueFactory();
exFactory = (ExecutionFactory) bootServiceModule(create, this, ExecutionFactory.MODULE, startParams);
// initailze the arrays of core and noncore tables
initializeCatalogInfo();
// indicate that we are in the process of booting
booting = true;
// set only if child class hasn't overriden this already
if (dataDescriptorGenerator == null) {
dataDescriptorGenerator = new DataDescriptorGenerator(this);
}
if (!create) {
// SYSTABLES
coreInfo[SYSTABLES_CORE_NUM].setHeapConglomerate(getBootParameter(startParams, CFG_SYSTABLES_ID, true));
coreInfo[SYSTABLES_CORE_NUM].setIndexConglomerate(SYSTABLESRowFactory.SYSTABLES_INDEX1_ID, getBootParameter(startParams, CFG_SYSTABLES_INDEX1_ID, true));
coreInfo[SYSTABLES_CORE_NUM].setIndexConglomerate(SYSTABLESRowFactory.SYSTABLES_INDEX2_ID, getBootParameter(startParams, CFG_SYSTABLES_INDEX2_ID, true));
// SYSCOLUMNS
coreInfo[SYSCOLUMNS_CORE_NUM].setHeapConglomerate(getBootParameter(startParams, CFG_SYSCOLUMNS_ID, true));
coreInfo[SYSCOLUMNS_CORE_NUM].setIndexConglomerate(SYSCOLUMNSRowFactory.SYSCOLUMNS_INDEX1_ID, getBootParameter(startParams, CFG_SYSCOLUMNS_INDEX1_ID, true));
// 2nd syscolumns index added in Xena, hence may not be there
coreInfo[SYSCOLUMNS_CORE_NUM].setIndexConglomerate(SYSCOLUMNSRowFactory.SYSCOLUMNS_INDEX2_ID, getBootParameter(startParams, CFG_SYSCOLUMNS_INDEX2_ID, false));
// SYSCONGLOMERATES
coreInfo[SYSCONGLOMERATES_CORE_NUM].setHeapConglomerate(getBootParameter(startParams, CFG_SYSCONGLOMERATES_ID, true));
coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate(SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX1_ID, getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX1_ID, true));
coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate(SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX2_ID, getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX2_ID, true));
coreInfo[SYSCONGLOMERATES_CORE_NUM].setIndexConglomerate(SYSCONGLOMERATESRowFactory.SYSCONGLOMERATES_INDEX3_ID, getBootParameter(startParams, CFG_SYSCONGLOMERATES_INDEX3_ID, true));
// SYSSCHEMAS
coreInfo[SYSSCHEMAS_CORE_NUM].setHeapConglomerate(getBootParameter(startParams, CFG_SYSSCHEMAS_ID, true));
coreInfo[SYSSCHEMAS_CORE_NUM].setIndexConglomerate(SYSSCHEMASRowFactory.SYSSCHEMAS_INDEX1_ID, getBootParameter(startParams, CFG_SYSSCHEMAS_INDEX1_ID, true));
coreInfo[SYSSCHEMAS_CORE_NUM].setIndexConglomerate(SYSSCHEMASRowFactory.SYSSCHEMAS_INDEX2_ID, getBootParameter(startParams, CFG_SYSSCHEMAS_INDEX2_ID, true));
}
String value = startParams.getProperty(Property.LANG_TD_CACHE_SIZE);
tdCacheSize = PropertyUtil.intPropertyValue(Property.LANG_TD_CACHE_SIZE, value, 0, Integer.MAX_VALUE, Property.LANG_TD_CACHE_SIZE_DEFAULT);
value = startParams.getProperty(Property.LANG_SPS_CACHE_SIZE);
stmtCacheSize = PropertyUtil.intPropertyValue(Property.LANG_SPS_CACHE_SIZE, value, 0, Integer.MAX_VALUE, Property.LANG_SPS_CACHE_SIZE_DEFAULT);
value = startParams.getProperty(Property.LANG_SEQGEN_CACHE_SIZE);
seqgenCacheSize = PropertyUtil.intPropertyValue(Property.LANG_SEQGEN_CACHE_SIZE, value, 0, Integer.MAX_VALUE, Property.LANG_SEQGEN_CACHE_SIZE_DEFAULT);
value = startParams.getProperty(Property.LANG_PERMISSIONS_CACHE_SIZE);
permissionsCacheSize = PropertyUtil.intPropertyValue(Property.LANG_PERMISSIONS_CACHE_SIZE, value, 0, Integer.MAX_VALUE, Property.LANG_PERMISSIONS_CACHE_SIZE_DEFAULT);
// See if automatic index statistics update is disabled through a
// system wide property. May be overridden by a database specific
// property later on.
// The default is that automatic index statistics update is enabled.
indexStatsUpdateDisabled = !PropertyUtil.getSystemBoolean(Property.STORAGE_AUTO_INDEX_STATS, true);
// See if we should enable logging of index stats activities.
indexStatsUpdateLogging = PropertyUtil.getSystemBoolean(Property.STORAGE_AUTO_INDEX_STATS_LOGGING);
// See if we should enable tracing of index stats activities.
indexStatsUpdateTracing = PropertyUtil.getSystemProperty(Property.STORAGE_AUTO_INDEX_STATS_TRACING, "off");
/*
* data dictionary contexts are only associated with connections.
* we have to look for the basic data dictionary, as there is
* no connection, and thus no context stack yet.
*/
/*
* Get the table descriptor cache.
*/
CacheFactory cf = (CacheFactory) startSystemModule(org.apache.derby.shared.common.reference.Module.CacheFactory);
OIDTdCache = cf.newCacheManager(this, "TableDescriptorOIDCache", tdCacheSize, tdCacheSize);
nameTdCache = cf.newCacheManager(this, "TableDescriptorNameCache", tdCacheSize, tdCacheSize);
if (stmtCacheSize > 0) {
spsNameCache = cf.newCacheManager(this, "SPSNameDescriptorCache", stmtCacheSize, stmtCacheSize);
spsIdHash = new Hashtable<UUID, SPSDescriptor>(stmtCacheSize);
// spsTextHash = new Hashtable(stmtCacheSize);
}
sequenceGeneratorCache = cf.newCacheManager(this, "SequenceGeneratorCache", seqgenCacheSize, seqgenCacheSize);
sequenceIDs = new HashMap<String, HashMap<String, String>>();
/* Get the object to coordinate cache transitions */
cacheCoordinator = new ShExLockable();
/* Get AccessFactory in order to transaction stuff */
af = (AccessFactory) findServiceModule(this, AccessFactory.MODULE);
/* Get the lock factory */
lockFactory = af.getLockFactory();
/*
* now we need to setup a context stack for the database creation work.
* We assume the System boot process has created a context
* manager already, but not that contexts we need are there.
*/
ContextService csf = getContextService();
ContextManager cm = csf.getCurrentContextManager();
if (SanityManager.DEBUG)
SanityManager.ASSERT((cm != null), "Failed to get current ContextManager");
// RESOLVE other non-StandardException errors.
bootingTC = null;
try {
// Get a transaction controller. This has the side effect of
// creating a transaction context if there isn't one already.
bootingTC = af.getTransaction(cm);
/*
We need an execution context so that we can generate rows
REMIND: maybe only for create case?
*/
exFactory.newExecutionContext(cm);
DataDescriptorGenerator ddg = getDataDescriptorGenerator();
// We should set the user schema collation type here now because
// later on, we are going to create user schema APP. By the time any
// user schema gets created, we should have the correct collation
// type set for such schemas to use. For this reason, don't remove
// the following if else statement and don't move it later in this
// method.
String userDefinedCollation;
if (create) {
// Get the collation attribute from the JDBC url. It can only
// have one of 2 possible values - UCS_BASIC or TERRITORY_BASED
// This attribute can only be specified at database create time.
// The attribute value has already been verified in DVF.boot and
// hence we can be assured that the attribute value if provided
// is either UCS_BASIC or TERRITORY_BASED. If none provided,
// then we will take it to be the default which is UCS_BASIC.
userDefinedCollation = startParams.getProperty(Attribute.COLLATION, Property.UCS_BASIC_COLLATION);
bootingTC.setProperty(Property.COLLATION, userDefinedCollation, true);
} else {
userDefinedCollation = startParams.getProperty(Property.COLLATION, Property.UCS_BASIC_COLLATION);
}
// Initialize the collation type of user schemas by looking at
// collation property/attribute.
collationTypeOfUserSchemas = DataTypeDescriptor.getCollationType(userDefinedCollation);
if (SanityManager.DEBUG)
SanityManager.ASSERT((collationTypeOfUserSchemas != -1), "Invalid collation type: " + userDefinedCollation);
// Now is also a good time to create schema descriptor for global
// temporary tables. Since this is a user schema, it should use the
// collation type associated with user schemas. Since we just
// finished setting up the collation type of user schema, it is
// safe to create user SchemaDescriptor(s) now.
declaredGlobalTemporaryTablesSchemaDesc = newDeclaredGlobalTemporaryTablesSchemaDesc(SchemaDescriptor.STD_DECLARED_GLOBAL_TEMPORARY_TABLES_SCHEMA_NAME);
boolean nativeAuthenticationEnabled = PropertyUtil.nativeAuthenticationEnabled(startParams);
if (create) {
String userName = IdUtil.getUserNameFromURLProps(startParams);
authorizationDatabaseOwner = IdUtil.getUserAuthorizationId(userName);
HashSet<String> newlyCreatedRoutines = new HashSet<String>();
// log the current dictionary version. Moving this statement to top as SYSCOLUMNSRowFactory
// queries the version info. SEE Derby-6904
dictionaryVersion = softwareVersion;
// create any required tables.
createDictionaryTables(startParams, bootingTC, ddg);
// create procedures for network server metadata
create_SYSIBM_procedures(bootingTC, newlyCreatedRoutines);
// create metadata sps statement required for network server
createSystemSps(bootingTC);
// create the SYSCS_UTIL system procedures)
create_SYSCS_procedures(bootingTC, newlyCreatedRoutines);
// now grant execute permission on some of these routines
grantPublicAccessToSystemRoutines(newlyCreatedRoutines, bootingTC, authorizationDatabaseOwner);
/* Set properties for current and create time
* DataDictionary versions.
*/
bootingTC.setProperty(DataDictionary.CORE_DATA_DICTIONARY_VERSION, dictionaryVersion, true);
bootingTC.setProperty(DataDictionary.CREATE_DATA_DICTIONARY_VERSION, dictionaryVersion, true);
//
if (PropertyUtil.getSystemBoolean(Property.SQL_AUTHORIZATION_PROPERTY)) {
bootingTC.setProperty(Property.SQL_AUTHORIZATION_PROPERTY, "true", true);
}
if (PropertyUtil.getSystemBoolean(Property.SQL_AUTHORIZATION_PROPERTY) || nativeAuthenticationEnabled) {
usesSqlAuthorization = true;
}
// Set default hash algorithm used to protect passwords stored
// in the database for BUILTIN and NATIVE authentication.
bootingTC.setProperty(Property.AUTHENTICATION_BUILTIN_ALGORITHM, findDefaultBuiltinAlgorithm(), false);
} else {
// Get the ids for non-core tables
loadDictionaryTables(bootingTC, startParams);
// See if index stats update is disabled by a database prop.
String dbIndexStatsUpdateAuto = PropertyUtil.getDatabaseProperty(bootingTC, Property.STORAGE_AUTO_INDEX_STATS);
if (dbIndexStatsUpdateAuto != null) {
indexStatsUpdateDisabled = !Boolean.valueOf(dbIndexStatsUpdateAuto).booleanValue();
}
String dbEnableIndexStatsLogging = PropertyUtil.getDatabaseProperty(bootingTC, Property.STORAGE_AUTO_INDEX_STATS_LOGGING);
if (dbEnableIndexStatsLogging != null) {
indexStatsUpdateLogging = Boolean.valueOf(dbEnableIndexStatsLogging).booleanValue();
}
String dbEnableIndexStatsTracing = PropertyUtil.getDatabaseProperty(bootingTC, Property.STORAGE_AUTO_INDEX_STATS_TRACING);
if (dbEnableIndexStatsTracing != null) {
if (!(dbEnableIndexStatsTracing.equalsIgnoreCase("off") || dbEnableIndexStatsTracing.equalsIgnoreCase("log") || dbEnableIndexStatsTracing.equalsIgnoreCase("stdout") || dbEnableIndexStatsTracing.equalsIgnoreCase("both"))) {
indexStatsUpdateTracing = "off";
} else {
indexStatsUpdateTracing = dbEnableIndexStatsTracing;
}
}
String sqlAuth = PropertyUtil.getDatabaseProperty(bootingTC, Property.SQL_AUTHORIZATION_PROPERTY);
// Feature compatibility check.
if (Boolean.valueOf(startParams.getProperty(Attribute.SOFT_UPGRADE_NO_FEATURE_CHECK)).booleanValue()) {
// database owner check at a hard upgrade.
if (dictionaryVersion.majorVersionNumber >= DataDictionary.DD_VERSION_DERBY_10_2) {
usesSqlAuthorization = Boolean.valueOf(sqlAuth).booleanValue() || nativeAuthenticationEnabled;
}
} else {
if (Boolean.valueOf(sqlAuth).booleanValue() || nativeAuthenticationEnabled) {
// SQL authorization requires 10.2 or higher database
checkVersion(DataDictionary.DD_VERSION_DERBY_10_2, "sqlAuthorization");
usesSqlAuthorization = true;
}
}
}
if (SanityManager.DEBUG)
SanityManager.ASSERT((authorizationDatabaseOwner != null), "Failed to get Database Owner authorization");
/* Commit & destroy the create database */
bootingTC.commit();
// done with ctx
cm.getContext(ExecutionContext.CONTEXT_ID).popMe();
} finally {
if (bootingTC != null) {
// gets rid of the transaction context
bootingTC.destroy();
bootingTC = null;
}
}
setDependencyManager();
booting = false;
}
use of org.apache.derby.catalog.UUID in project derby by apache.
the class DataDictionaryImpl method create_10_13_system_procedures.
/**
* <p>
* Create system procedures that are part of the SYSCS_UTIL schema, added in version 10.13.
* </p>
*
* @param tc an instance of the Transaction Controller.
* @param newlyCreatedRoutines set of routines we are creating (used to add permissions later on)
*/
void create_10_13_system_procedures(TransactionController tc, HashSet<String> newlyCreatedRoutines) throws StandardException {
UUID sysUtilUUID = getSystemUtilSchemaDescriptor().getUUID();
TypeDescriptor varchar32672Type = DataTypeDescriptor.getCatalogType(Types.VARCHAR, 32672);
/* SYSCS_IMPORT_TABLE_BULK(IN SCHEMANAME VARCHAR(128),
* IN TABLENAME VARCHAR(128), IN FILENAME VARCHAR(32672),
* IN COLUMNDELIMITER CHAR(1), IN CHARACTERDELIMITER CHAR(1),
* IN CODESET VARCHAR(128) , IN REPLACE SMALLINT
* IN SKIP SMALLINT )
*/
{
// procedure argument names
String[] arg_names = { "schemaName", "tableName", "fileName", " columnDelimiter", "characterDelimiter", "codeset", "replace", "skip" };
// procedure argument types
TypeDescriptor[] arg_types = { CATALOG_TYPE_SYSTEM_IDENTIFIER, CATALOG_TYPE_SYSTEM_IDENTIFIER, varchar32672Type, DataTypeDescriptor.getCatalogType(Types.CHAR, 1), DataTypeDescriptor.getCatalogType(Types.CHAR, 1), CATALOG_TYPE_SYSTEM_IDENTIFIER, TypeDescriptor.SMALLINT, TypeDescriptor.SMALLINT };
createSystemProcedureOrFunction("SYSCS_IMPORT_TABLE_BULK", sysUtilUUID, arg_names, arg_types, 0, 0, RoutineAliasInfo.MODIFIES_SQL_DATA, false, false, (TypeDescriptor) null, newlyCreatedRoutines, tc);
}
/* SYSCS_IMPORT_DATA_BULK(IN SCHEMANAME VARCHAR(128),
* IN TABLENAME VARCHAR(128), IN INSERTCOLUMNLIST VARCHAR(32672),
* IN COLUMNINDEXES VARCHAR(32672), IN IN FILENAME VARCHAR(32672),
* IN COLUMNDELIMITER CHAR(1), IN CHARACTERDELIMITER CHAR(1),
* IN CODESET VARCHAR(128) , IN REPLACE SMALLINT
* IN SKIP SMALLINT)
*/
{
// procedure argument names
String[] arg_names = { "schemaName", "tableName", "insertColumnList", "columnIndexes", "fileName", " columnDelimiter", "characterDelimiter", "codeset", "replace", "skip" };
// procedure argument types
// procedure argument types
TypeDescriptor[] arg_types = { CATALOG_TYPE_SYSTEM_IDENTIFIER, CATALOG_TYPE_SYSTEM_IDENTIFIER, varchar32672Type, varchar32672Type, varchar32672Type, DataTypeDescriptor.getCatalogType(Types.CHAR, 1), DataTypeDescriptor.getCatalogType(Types.CHAR, 1), CATALOG_TYPE_SYSTEM_IDENTIFIER, TypeDescriptor.SMALLINT, TypeDescriptor.SMALLINT };
createSystemProcedureOrFunction("SYSCS_IMPORT_DATA_BULK", sysUtilUUID, arg_names, arg_types, 0, 0, RoutineAliasInfo.MODIFIES_SQL_DATA, false, false, (TypeDescriptor) null, newlyCreatedRoutines, tc);
}
}
Aggregations