use of org.voltdb.catalog.Cluster in project voltdb by VoltDB.
the class ExportManager method updateCatalog.
public synchronized void updateCatalog(CatalogContext catalogContext, boolean requireCatalogDiffCmdsApplyToEE, boolean requiresNewExportGeneration, List<Integer> partitions) {
final Cluster cluster = catalogContext.catalog.getClusters().get("cluster");
final Database db = cluster.getDatabases().get("database");
final CatalogMap<Connector> connectors = db.getConnectors();
updateProcessorConfig(connectors);
if (m_processorConfig.isEmpty()) {
m_lastNonEnabledGeneration = catalogContext.m_uniqueId;
return;
}
if (!requiresNewExportGeneration) {
exportLog.info("Skipped rolling generations as no stream related changes happened during this update.");
return;
}
/**
* This checks if the catalogUpdate was done in EE or not. If catalog update is skipped for @UpdateClasses and such
* EE does not roll to new generation and thus we need to ignore creating new generation roll with the current generation.
* If anything changes in getDiffCommandsForEE or design changes pay attention to fix this.
*/
if (requireCatalogDiffCmdsApplyToEE == false) {
exportLog.info("Skipped rolling generations as generation not created in EE.");
return;
}
File exportOverflowDirectory = new File(VoltDB.instance().getExportOverflowPath());
ExportGeneration newGeneration;
try {
newGeneration = new ExportGeneration(catalogContext.m_uniqueId, exportOverflowDirectory, false);
newGeneration.setGenerationDrainRunnable(new GenerationDrainRunnable(newGeneration));
newGeneration.initializeGenerationFromCatalog(connectors, m_hostId, m_messenger, partitions);
m_generations.put(catalogContext.m_uniqueId, newGeneration);
} catch (IOException e1) {
VoltDB.crashLocalVoltDB("Error processing catalog update in export system", true, e1);
}
/*
* If there is no existing export processor, create an initial one.
* This occurs when export is turned on/off at runtime.
*/
if (m_processor.get() == null) {
exportLog.info("First stream created processor will be initialized: " + m_loaderClass);
createInitialExportProcessor(catalogContext, connectors, false, partitions, false);
}
}
use of org.voltdb.catalog.Cluster in project voltdb by VoltDB.
the class CatalogUtil method setDrInfo.
private static void setDrInfo(Catalog catalog, DrType dr, ClusterType clusterType) {
int clusterId;
Cluster cluster = catalog.getClusters().get("cluster");
final Database db = cluster.getDatabases().get("database");
assert cluster != null;
if (dr != null) {
ConnectionType drConnection = dr.getConnection();
cluster.setDrproducerenabled(dr.isListen());
cluster.setDrproducerport(dr.getPort());
cluster.setDrrole(dr.getRole().name().toLowerCase());
if (dr.getRole() == DrRoleType.XDCR) {
// Setting this for compatibility mode only, don't use in new code
db.setIsactiveactivedred(true);
}
// Backward compatibility to support cluster id in DR tag
if (clusterType.getId() == null && dr.getId() != null) {
clusterId = dr.getId();
} else if (clusterType.getId() != null && dr.getId() == null) {
clusterId = clusterType.getId();
} else if (clusterType.getId() == null && dr.getId() == null) {
clusterId = 0;
} else {
if (clusterType.getId() == dr.getId()) {
clusterId = clusterType.getId();
} else {
throw new RuntimeException("Detected two conflicting cluster ids in deployement file, setting cluster id in DR tag is " + "deprecated, please remove");
}
}
cluster.setDrflushinterval(dr.getFlushInterval());
if (drConnection != null) {
String drSource = drConnection.getSource();
cluster.setDrmasterhost(drSource);
cluster.setDrconsumerenabled(drConnection.isEnabled());
if (drConnection.getPreferredSource() != null) {
cluster.setPreferredsource(drConnection.getPreferredSource());
} else {
// reset to -1, if this is an update catalog
cluster.setPreferredsource(-1);
}
hostLog.info("Configured connection for DR replica role to host " + drSource);
} else {
if (dr.getRole() == DrRoleType.XDCR) {
// consumer should be enabled even without connection source for XDCR
cluster.setDrconsumerenabled(true);
// reset to -1, if this is an update catalog
cluster.setPreferredsource(-1);
}
}
} else {
cluster.setDrrole(DrRoleType.NONE.value());
if (clusterType.getId() != null) {
clusterId = clusterType.getId();
} else {
clusterId = 0;
}
}
cluster.setDrclusterid(clusterId);
}
use of org.voltdb.catalog.Cluster in project voltdb by VoltDB.
the class CatalogUtil method setExportInfo.
/**
* Set deployment time settings for export
* @param catalog The catalog to be updated.
* @param exportsType A reference to the <exports> element of the deployment.xml file.
*/
private static void setExportInfo(Catalog catalog, ExportType exportType) {
final Cluster cluster = catalog.getClusters().get("cluster");
Database db = cluster.getDatabases().get("database");
if (DrRoleType.XDCR.value().equals(cluster.getDrrole())) {
// add default export configuration to DR conflict table
exportType = addExportConfigToDRConflictsTable(catalog, exportType);
}
if (exportType == null) {
return;
}
List<String> targetList = new ArrayList<>();
for (ExportConfigurationType exportConfiguration : exportType.getConfiguration()) {
boolean connectorEnabled = exportConfiguration.isEnabled();
String targetName = exportConfiguration.getTarget();
if (connectorEnabled) {
m_exportEnabled = true;
if (targetList.contains(targetName)) {
throw new RuntimeException("Multiple connectors can not be assigned to single export target: " + targetName + ".");
} else {
targetList.add(targetName);
}
}
Properties processorProperties = checkExportProcessorConfiguration(exportConfiguration);
org.voltdb.catalog.Connector catconn = db.getConnectors().get(targetName);
if (catconn == null) {
if (connectorEnabled) {
hostLog.info("Export configuration enabled and provided for export target " + targetName + " in deployment file however no export " + "tables are assigned to the this target. " + "Export target " + targetName + " will be disabled.");
}
continue;
}
// checking rowLengthLimit
int rowLengthLimit = Integer.parseInt(processorProperties.getProperty(ROW_LENGTH_LIMIT, "0"));
if (rowLengthLimit > 0) {
for (ConnectorTableInfo catTableinfo : catconn.getTableinfo()) {
Table tableref = catTableinfo.getTable();
int rowLength = Boolean.parseBoolean(processorProperties.getProperty("skipinternals", "false")) ? 0 : EXPORT_INTERNAL_FIELD_Length;
for (Column catColumn : tableref.getColumns()) {
rowLength += catColumn.getSize();
}
if (rowLength > rowLengthLimit) {
hostLog.error("Export configuration for export target " + targetName + " has" + "configured to has row length limit " + rowLengthLimit + ". But the export table " + tableref.getTypeName() + " has estimated row length " + rowLength + ".");
throw new RuntimeException("Export table " + tableref.getTypeName() + " row length is " + rowLength + ", exceeding configurated limitation " + rowLengthLimit + ".");
}
}
}
for (String name : processorProperties.stringPropertyNames()) {
ConnectorProperty prop = catconn.getConfig().add(name);
prop.setName(name);
prop.setValue(processorProperties.getProperty(name));
}
// on-server export always uses the guest processor
catconn.setLoaderclass(ExportManager.PROCESSOR_CLASS);
catconn.setEnabled(connectorEnabled);
if (!connectorEnabled) {
hostLog.info("Export configuration for export target " + targetName + " is present and is " + "configured to be disabled. Export target " + targetName + " will be disabled.");
} else {
hostLog.info("Export target " + targetName + " is configured and enabled with type=" + exportConfiguration.getType());
if (exportConfiguration.getProperty() != null) {
hostLog.info("Export target " + targetName + " configuration properties are: ");
for (PropertyType configProp : exportConfiguration.getProperty()) {
if (!configProp.getName().toLowerCase().contains("password")) {
hostLog.info("Export Configuration Property NAME=" + configProp.getName() + " VALUE=" + configProp.getValue());
}
}
}
}
}
}
use of org.voltdb.catalog.Cluster in project voltdb by VoltDB.
the class TestCatalogUtil method testIv2PartitionDetectionSettings.
// I'm not testing the legacy behavior here, just IV2
public void testIv2PartitionDetectionSettings() throws Exception {
final String noElement = "<?xml version='1.0' encoding='UTF-8' standalone='no'?>" + "<deployment>" + " <cluster hostcount='3' kfactor='1' sitesperhost='2'/>" + "</deployment>";
final String ppdEnabledDefaultPrefix = "<?xml version='1.0' encoding='UTF-8' standalone='no'?>" + "<deployment>" + " <cluster hostcount='3' kfactor='1' sitesperhost='2'/>" + " <partition-detection enabled='true' />" + "</deployment>";
final String ppdDisabledNoPrefix = "<?xml version='1.0' encoding='UTF-8' standalone='no'?>" + "<deployment>" + " <cluster hostcount='3' kfactor='1' sitesperhost='2'/>" + " <partition-detection enabled='false' />" + "</deployment>";
final File tmpNoElement = VoltProjectBuilder.writeStringToTempFile(noElement);
String msg = CatalogUtil.compileDeployment(catalog, tmpNoElement.getPath(), false);
assertTrue("Deployment file failed to parse: " + msg, msg == null);
Cluster cluster = catalog.getClusters().get("cluster");
assertTrue(cluster.getNetworkpartition());
setUp();
final File tmpEnabledDefault = VoltProjectBuilder.writeStringToTempFile(ppdEnabledDefaultPrefix);
msg = CatalogUtil.compileDeployment(catalog, tmpEnabledDefault.getPath(), false);
assertTrue("Deployment file failed to parse: " + msg, msg == null);
cluster = catalog.getClusters().get("cluster");
assertTrue(cluster.getNetworkpartition());
setUp();
final File tmpDisabled = VoltProjectBuilder.writeStringToTempFile(ppdDisabledNoPrefix);
msg = CatalogUtil.compileDeployment(catalog, tmpDisabled.getPath(), false);
assertTrue("Deployment file failed to parse: " + msg, msg == null);
cluster = catalog.getClusters().get("cluster");
assertFalse(cluster.getNetworkpartition());
}
use of org.voltdb.catalog.Cluster in project voltdb by VoltDB.
the class TestCatalogUtil method testSecurityProvider.
public void testSecurityProvider() throws Exception {
final String secOff = "<?xml version='1.0' encoding='UTF-8' standalone='no'?>" + "<deployment>" + " <cluster hostcount='3' kfactor='1' sitesperhost='2'/>" + " <paths><voltdbroot path=\"/tmp/" + System.getProperty("user.name") + "\" /></paths>" + " <security enabled=\"true\"/>" + " <users>" + " <user name=\"joe\" password=\"aaa\" roles=\"administrator\"/>" + " </users>" + "</deployment>";
final String secOn = "<?xml version='1.0' encoding='UTF-8' standalone='no'?>" + "<deployment>" + " <cluster hostcount='3' kfactor='1' sitesperhost='2'/>" + " <paths><voltdbroot path=\"/tmp/" + System.getProperty("user.name") + "\" /></paths>" + " <security enabled=\"true\" provider=\"kerberos\"/>" + " <users>" + " <user name=\"joe\" password=\"aaa\" roles=\"administrator\"/>" + " </users>" + "</deployment>";
final File tmpSecOff = VoltProjectBuilder.writeStringToTempFile(secOff);
CatalogUtil.compileDeployment(catalog, tmpSecOff.getPath(), false);
Cluster cluster = catalog.getClusters().get("cluster");
Database db = cluster.getDatabases().get("database");
assertTrue(cluster.getSecurityenabled());
assertEquals("hash", db.getSecurityprovider());
setUp();
final File tmpSecOn = VoltProjectBuilder.writeStringToTempFile(secOn);
CatalogUtil.compileDeployment(catalog, tmpSecOn.getPath(), false);
cluster = catalog.getClusters().get("cluster");
db = cluster.getDatabases().get("database");
assertTrue(cluster.getSecurityenabled());
assertEquals("kerberos", db.getSecurityprovider());
}
Aggregations