use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class TestVoltDB method testCompileDeploymentAddUserToNonExistentGroup.
/**
* ENG-7088: Validate that deployment file users that want to belong to roles which
* don't yet exist don't render the deployment file invalid.
*/
@Test
public void testCompileDeploymentAddUserToNonExistentGroup() throws IOException {
TPCCProjectBuilder project = new TPCCProjectBuilder();
project.addDefaultSchema();
project.addDefaultPartitioning();
project.addDefaultProcedures();
project.setSecurityEnabled(true, true);
RoleInfo[] groups = new RoleInfo[] { new RoleInfo("foo", false, false, false, false, false, false), new RoleInfo("blah", false, false, false, false, false, false) };
project.addRoles(groups);
UserInfo[] users = new UserInfo[] { new UserInfo("john", "hugg", new String[] { "foo" }), new UserInfo("ryan", "betts", new String[] { "foo", "bar" }), new UserInfo("ariel", "weisberg", new String[] { "bar" }) };
project.addUsers(users);
String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
String jarName = "compile-deployment.jar";
String catalogJar = testDir + File.separator + jarName;
assertTrue("Project failed to compile", project.compile(catalogJar));
byte[] bytes = MiscUtils.fileToBytes(new File(catalogJar));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
assertNotNull("Error loading catalog from jar", serializedCatalog);
Catalog catalog = new Catalog();
catalog.execute(serializedCatalog);
// this should succeed even though group "bar" does not exist
assertTrue("Deployment file should have been able to validate", CatalogUtil.compileDeployment(catalog, project.getPathToDeployment(), true) == null);
}
use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class CatalogContext method update.
public CatalogContext update(long txnId, long uniqueId, byte[] catalogBytes, byte[] catalogBytesHash, String diffCommands, boolean incrementVersion, byte[] deploymentBytes, HostMessenger messenger, boolean hasSchemaChange) {
Catalog newCatalog = catalog.deepCopy();
newCatalog.execute(diffCommands);
int incValue = incrementVersion ? 1 : 0;
// If there's no new catalog bytes, preserve the old one rather than
// bashing it
byte[] bytes = catalogBytes;
if (bytes == null) {
try {
bytes = this.getCatalogJarBytes();
} catch (IOException e) {
// Failure is not an option
hostLog.fatal(e.getMessage());
}
}
// Ditto for the deploymentBytes
byte[] depbytes = deploymentBytes;
if (depbytes == null) {
depbytes = this.deploymentBytes;
}
CatalogContext retval = new CatalogContext(txnId, uniqueId, newCatalog, this.m_dbSettings, bytes, catalogBytesHash, depbytes, catalogVersion + incValue, messenger, hasSchemaChange, m_defaultProcs, m_ptool);
return retval;
}
use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class RealVoltDB method readDeploymentAndCreateStarterCatalogContext.
boolean readDeploymentAndCreateStarterCatalogContext(VoltDB.Configuration config) {
/*
* Debate with the cluster what the deployment file should be
*/
try {
ZooKeeper zk = m_messenger.getZK();
byte[] deploymentBytes = null;
try {
deploymentBytes = org.voltcore.utils.CoreUtils.urlToBytes(m_config.m_pathToDeployment);
} catch (Exception ex) {
//Let us get bytes from ZK
}
DeploymentType deployment = null;
try {
if (deploymentBytes != null) {
CatalogUtil.writeCatalogToZK(zk, // Fill in innocuous values for non-deployment stuff
0, 0L, 0L, // spin loop in Inits.LoadCatalog.run() needs
new byte[] {}, // this to be of zero length until we have a real catalog.
null, deploymentBytes);
hostLog.info("URL of deployment: " + m_config.m_pathToDeployment);
} else {
CatalogAndIds catalogStuff = CatalogUtil.getCatalogFromZK(zk);
deploymentBytes = catalogStuff.deploymentBytes;
}
} catch (KeeperException.NodeExistsException e) {
CatalogAndIds catalogStuff = CatalogUtil.getCatalogFromZK(zk);
byte[] deploymentBytesTemp = catalogStuff.deploymentBytes;
if (deploymentBytesTemp != null) {
//We will ignore the supplied or default deployment anyways.
if (deploymentBytes != null && !m_config.m_deploymentDefault) {
byte[] deploymentHashHere = CatalogUtil.makeDeploymentHash(deploymentBytes);
if (!(Arrays.equals(deploymentHashHere, catalogStuff.getDeploymentHash()))) {
hostLog.warn("The locally provided deployment configuration did not " + " match the configuration information found in the cluster.");
} else {
hostLog.info("Deployment configuration pulled from other cluster node.");
}
}
//Use remote deployment obtained.
deploymentBytes = deploymentBytesTemp;
} else {
hostLog.error("Deployment file could not be loaded locally or remotely, " + "local supplied path: " + m_config.m_pathToDeployment);
deploymentBytes = null;
}
} catch (KeeperException.NoNodeException e) {
// no deploymentBytes case is handled below. So just log this error.
if (hostLog.isDebugEnabled()) {
hostLog.debug("Error trying to get deployment bytes from cluster", e);
}
}
if (deploymentBytes == null) {
hostLog.error("Deployment information could not be obtained from cluster node or locally");
VoltDB.crashLocalVoltDB("No such deployment file: " + m_config.m_pathToDeployment, false, null);
}
if (deployment == null) {
deployment = CatalogUtil.getDeployment(new ByteArrayInputStream(deploymentBytes));
}
// wasn't a valid xml deployment file
if (deployment == null) {
hostLog.error("Not a valid XML deployment file at URL: " + m_config.m_pathToDeployment);
VoltDB.crashLocalVoltDB("Not a valid XML deployment file at URL: " + m_config.m_pathToDeployment, false, null);
}
/*
* Check for invalid deployment file settings (enterprise-only) in the community edition.
* Trick here is to print out all applicable problems and then stop, rather than stopping
* after the first one is found.
*/
if (!m_config.m_isEnterprise) {
boolean shutdownDeployment = false;
boolean shutdownAction = false;
// check license features for community version
if ((deployment.getCluster() != null) && (deployment.getCluster().getKfactor() > 0)) {
consoleLog.error("K-Safety is not supported " + "in the community edition of VoltDB.");
shutdownDeployment = true;
}
if ((deployment.getSnapshot() != null) && (deployment.getSnapshot().isEnabled())) {
consoleLog.error("Snapshots are not supported " + "in the community edition of VoltDB.");
shutdownDeployment = true;
}
if ((deployment.getCommandlog() != null) && (deployment.getCommandlog().isEnabled())) {
consoleLog.error("Command logging is not supported " + "in the community edition of VoltDB.");
shutdownDeployment = true;
}
if ((deployment.getExport() != null) && deployment.getExport().getConfiguration() != null && !deployment.getExport().getConfiguration().isEmpty()) {
consoleLog.error("Export is not supported " + "in the community edition of VoltDB.");
shutdownDeployment = true;
}
// check the start action for the community edition
if (m_config.m_startAction != StartAction.CREATE) {
consoleLog.error("Start action \"" + m_config.m_startAction.getClass().getSimpleName() + "\" is not supported in the community edition of VoltDB.");
shutdownAction = true;
}
// if the process needs to stop, try to be helpful
if (shutdownAction || shutdownDeployment) {
String msg = "This process will exit. Please run VoltDB with ";
if (shutdownDeployment) {
msg += "a deployment file compatible with the community edition";
}
if (shutdownDeployment && shutdownAction) {
msg += " and ";
}
if (shutdownAction && !shutdownDeployment) {
msg += "the CREATE start action";
}
msg += ".";
VoltDB.crashLocalVoltDB(msg, false, null);
}
}
// note the heart beats are specified in seconds in xml, but ms internally
HeartbeatType hbt = deployment.getHeartbeat();
if (hbt != null) {
m_config.m_deadHostTimeoutMS = hbt.getTimeout() * 1000;
m_messenger.setDeadHostTimeout(m_config.m_deadHostTimeoutMS);
} else {
hostLog.info("Dead host timeout set to " + m_config.m_deadHostTimeoutMS + " milliseconds");
}
PartitionDetectionType pt = deployment.getPartitionDetection();
if (pt != null) {
m_config.m_partitionDetectionEnabled = pt.isEnabled();
m_messenger.setPartitionDetectionEnabled(m_config.m_partitionDetectionEnabled);
}
// get any consistency settings into config
ConsistencyType consistencyType = deployment.getConsistency();
if (consistencyType != null) {
m_config.m_consistencyReadLevel = Consistency.ReadLevel.fromReadLevelType(consistencyType.getReadlevel());
}
final String elasticSetting = deployment.getCluster().getElastic().trim().toUpperCase();
if (elasticSetting.equals("ENABLED")) {
TheHashinator.setConfiguredHashinatorType(HashinatorType.ELASTIC);
} else if (!elasticSetting.equals("DISABLED")) {
VoltDB.crashLocalVoltDB("Error in deployment file, elastic attribute of " + "cluster element must be " + "'enabled' or 'disabled' but was '" + elasticSetting + "'", false, null);
} else {
TheHashinator.setConfiguredHashinatorType(HashinatorType.LEGACY);
}
// log system setting information
SystemSettingsType sysType = deployment.getSystemsettings();
if (sysType != null) {
if (sysType.getElastic() != null) {
hostLog.info("Elastic duration set to " + sysType.getElastic().getDuration() + " milliseconds");
hostLog.info("Elastic throughput set to " + sysType.getElastic().getThroughput() + " mb/s");
}
if (sysType.getTemptables() != null) {
hostLog.info("Max temptable size set to " + sysType.getTemptables().getMaxsize() + " mb");
}
if (sysType.getSnapshot() != null) {
hostLog.info("Snapshot priority set to " + sysType.getSnapshot().getPriority() + " [0 - 10]");
}
if (sysType.getQuery() != null) {
if (sysType.getQuery().getTimeout() > 0) {
hostLog.info("Query timeout set to " + sysType.getQuery().getTimeout() + " milliseconds");
m_config.m_queryTimeout = sysType.getQuery().getTimeout();
} else if (sysType.getQuery().getTimeout() == 0) {
hostLog.info("Query timeout set to unlimited");
m_config.m_queryTimeout = 0;
}
}
}
// log a warning on console log if security setting is turned off, like durability warning.
SecurityType securityType = deployment.getSecurity();
if (securityType == null || !securityType.isEnabled()) {
consoleLog.warn(SECURITY_OFF_WARNING);
}
// create a dummy catalog to load deployment info into
Catalog catalog = new Catalog();
// Need these in the dummy catalog
Cluster cluster = catalog.getClusters().add("cluster");
cluster.getDatabases().add("database");
String result = CatalogUtil.compileDeployment(catalog, deployment, true);
if (result != null) {
// Any other non-enterprise deployment errors will be caught and handled here
// (such as <= 0 host count)
VoltDB.crashLocalVoltDB(result);
}
m_catalogContext = new CatalogContext(//txnid
TxnEgo.makeZero(MpInitiator.MP_INIT_PID).getTxnId(), //timestamp
0, catalog, new DbSettings(m_clusterSettings, m_nodeSettings), new byte[] {}, null, deploymentBytes, 0, m_messenger);
return ((deployment.getCommandlog() != null) && (deployment.getCommandlog().isEnabled()));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class TPCCProjectBuilder method createTPCCSchemaCatalog.
/**
* Get a pointer to a compiled catalog for TPCC with all the procedures.
*/
public Catalog createTPCCSchemaCatalog() throws IOException {
// compile a catalog
String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
String catalogJar = testDir + File.separator + "tpcc-jni.jar";
addDefaultSchema();
addDefaultPartitioning();
addDefaultProcedures();
Catalog catalog = compile(catalogJar, 1, 1, 0, null);
assert (catalog != null);
return catalog;
}
use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class TestLiveDDLCompiler method testMultiplePartitionStatements.
// Test that multiple partition statements are accepted and that
// the final result is the final requested partitioning
public void testMultiplePartitionStatements() throws Exception {
File jarOut = new File("partitionfun.jar");
jarOut.deleteOnExit();
String schema = "CREATE TABLE T (C1 INTEGER NOT NULL, C2 INTEGER NOT NULL, C3 INTEGER NOT NULL);\n" + "PARTITION TABLE T ON COLUMN C1;\n" + "PARTITION TABLE T ON COLUMN C2;\n";
File schemaFile = VoltProjectBuilder.writeStringToTempFile(schema);
String schemaPath = schemaFile.getPath();
VoltCompiler compiler = new VoltCompiler(false);
boolean success = compiler.compileFromDDL(jarOut.getPath(), schemaPath);
assertTrue("Compilation failed unexpectedly", success);
Catalog catalog = new Catalog();
catalog.execute(CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(MiscUtils.fileToBytes(new File(jarOut.getPath())), false).getFirst()));
Database db = catalog.getClusters().get("cluster").getDatabases().get("database");
Table t = db.getTables().get("T");
assertEquals("C2", t.getPartitioncolumn().getTypeName());
}
Aggregations