use of org.apache.phoenix.pherf.workload.WriteWorkload in project phoenix by apache.
the class RuleGeneratorTest method testSequentialDataSequence.
@Test
public void testSequentialDataSequence() throws Exception {
XMLConfigParser parser = new XMLConfigParser(matcherScenario);
DataModel model = parser.getDataModels().get(0);
WriteWorkload loader = new WriteWorkload(parser);
RulesApplier rulesApplier = loader.getRulesApplier();
Column targetColumn = null;
for (Column column : model.getDataMappingColumns()) {
DataSequence sequence = column.getDataSequence();
if (sequence == DataSequence.SEQUENTIAL) {
targetColumn = column;
break;
}
}
assertNotNull("Could not find a DataSequence.SEQENTIAL rule.", targetColumn);
assertMultiThreadedIncrementValue(targetColumn, rulesApplier);
}
use of org.apache.phoenix.pherf.workload.WriteWorkload in project phoenix by apache.
the class RuleGeneratorTest method testValueListRule.
@Test
public void testValueListRule() throws Exception {
List<String> expectedValues = new ArrayList();
expectedValues.add("aAAyYhnNbBs9kWk");
expectedValues.add("bBByYhnNbBs9kWu");
expectedValues.add("cCCyYhnNbBs9kWr");
XMLConfigParser parser = new XMLConfigParser(matcherScenario);
WriteWorkload loader = new WriteWorkload(parser);
RulesApplier rulesApplier = loader.getRulesApplier();
Scenario scenario = parser.getScenarios().get(0);
Column simPhxCol = new Column();
simPhxCol.setName("PARENT_ID");
simPhxCol.setType(DataTypeMapping.CHAR);
// Run this 10 times gives a reasonable chance that all the values will appear at least once
for (int i = 0; i < 10; i++) {
DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol);
assertTrue("Got a value not in the list for the rule. :" + value.getValue(), expectedValues.contains(value.getValue()));
}
}
use of org.apache.phoenix.pherf.workload.WriteWorkload in project phoenix by apache.
the class RuleGeneratorTest method testRuleOverrides.
@Test
public void testRuleOverrides() throws Exception {
XMLConfigParser parser = new XMLConfigParser(matcherScenario);
WriteWorkload loader = new WriteWorkload(parser);
RulesApplier rulesApplier = loader.getRulesApplier();
Scenario scenario = parser.getScenarios().get(0);
// We should be able to find the correct rule based only on Type and Name combination
// Test CHAR
Column simPhxCol = new Column();
simPhxCol.setName("OTHER_ID");
simPhxCol.setType(DataTypeMapping.CHAR);
// Get the rule we expect to match
Column rule = rulesApplier.getRule(simPhxCol);
assertEquals("Did not find the correct rule.", rule.getName(), simPhxCol.getName());
assertEquals("Did not find the matching rule type.", rule.getType(), simPhxCol.getType());
assertEquals("Rule contains incorrect length.", rule.getLength(), 8);
assertEquals("Rule contains incorrect prefix.", rule.getPrefix(), "z0Oxx00");
DataValue value = rulesApplier.getDataForRule(scenario, simPhxCol);
assertEquals("Value returned does not match rule.", value.getValue().length(), 8);
// Test VARCHAR with RANDOM and prefix
simPhxCol.setName("OLDVAL_STRING");
simPhxCol.setType(DataTypeMapping.VARCHAR);
// Get the rule we expect to match
rule = rulesApplier.getRule(simPhxCol);
assertEquals("Did not find the correct rule.", rule.getName(), simPhxCol.getName());
assertEquals("Did not find the matching rule type.", rule.getType(), simPhxCol.getType());
assertEquals("Rule contains incorrect length.", rule.getLength(), 10);
assertEquals("Rule contains incorrect prefix.", rule.getPrefix(), "MYPRFX");
value = rulesApplier.getDataForRule(scenario, simPhxCol);
assertEquals("Value returned does not match rule.", value.getValue().length(), 10);
assertTrue("Value returned start with prefix.", StringUtils.startsWith(value.getValue(), rule.getPrefix()));
}
use of org.apache.phoenix.pherf.workload.WriteWorkload in project phoenix by apache.
the class DataIngestIT method testColumnRulesApplied.
@Test
public void testColumnRulesApplied() {
Scenario scenario = null;
try {
scenario = parser.getScenarioByName("testScenario");
List<Column> columnListFromPhoenix = util.getColumnsFromPhoenix(scenario.getSchemaName(), scenario.getTableNameWithoutSchemaName(), util.getConnection());
assertTrue("Could not get phoenix columns.", columnListFromPhoenix.size() > 0);
WriteWorkload loader = new WriteWorkload(util, parser, scenario, GeneratePhoenixStats.NO);
WorkloadExecutor executor = new WorkloadExecutor();
executor.add(loader);
executor.get();
executor.shutdown();
RulesApplier rulesApplier = loader.getRulesApplier();
List<Map> modelList = rulesApplier.getModelList();
assertTrue("Could not generate the modelList", modelList.size() > 0);
for (Column column : columnListFromPhoenix) {
DataValue data = rulesApplier.getDataForRule(scenario, column);
// We are generating data values
// so the value should have been specified by this point.
assertTrue("Failed to retrieve data for column type: " + column.getType(), data != null);
// so we should get the default rule.
if ((column.getType() == DataTypeMapping.VARCHAR) && (column.getName().equals("NEWVAL_STRING"))) {
assertTrue("Failed to retrieve data for column type: ", data.getDistribution() == Integer.MIN_VALUE);
}
}
// Run some queries
executor = new WorkloadExecutor();
Workload query = new QueryExecutor(parser, util, executor);
executor.add(query);
executor.get();
executor.shutdown();
PhoenixUtil.create().deleteTables("ALL");
} catch (Exception e) {
fail("We had an exception: " + e.getMessage());
}
}
use of org.apache.phoenix.pherf.workload.WriteWorkload in project phoenix by apache.
the class Pherf method run.
public void run() throws Exception {
MonitorManager monitorManager = null;
List<Workload> workloads = new ArrayList<>();
WorkloadExecutor workloadExecutor = new WorkloadExecutor(properties, workloads, !isFunctional);
try {
if (listFiles) {
ResourceList list = new ResourceList(PherfConstants.RESOURCE_DATAMODEL);
Collection<Path> schemaFiles = list.getResourceList(PherfConstants.SCHEMA_ROOT_PATTERN + ".sql");
System.out.println("Schema Files:");
for (Path path : schemaFiles) {
System.out.println(path);
}
list = new ResourceList(PherfConstants.RESOURCE_SCENARIO);
Collection<Path> scenarioFiles = list.getResourceList(PherfConstants.SCENARIO_ROOT_PATTERN + ".xml");
System.out.println("Scenario Files:");
for (Path path : scenarioFiles) {
System.out.println(path);
}
return;
}
// Compare results and exit
if (null != compareResults) {
logger.info("\nStarting to compare results and exiting for " + compareResults);
new GoogleChartGenerator(compareResults, compareType).readAndRender();
return;
}
XMLConfigParser parser = new XMLConfigParser(scenarioFile);
// Drop tables with PHERF schema and regex comparison
if (null != dropPherfTablesRegEx) {
logger.info("\nDropping existing table with PHERF namename and " + dropPherfTablesRegEx + " regex expression.");
phoenixUtil.deleteTables(dropPherfTablesRegEx);
}
if (monitor) {
monitorManager = new MonitorManager(Integer.parseInt(properties.getProperty("pherf.default.monitorFrequency")));
workloadExecutor.add(monitorManager);
}
if (applySchema) {
logger.info("\nStarting to apply schema...");
SchemaReader reader = (schemaFile == null) ? new SchemaReader(".*.sql") : new SchemaReader(schemaFile);
reader.applySchema();
}
// Schema and Data Load
if (preLoadData) {
logger.info("\nStarting Data Load...");
Workload workload = new WriteWorkload(parser, generateStatistics);
try {
workloadExecutor.add(workload);
// Wait for dataLoad to complete
workloadExecutor.get(workload);
} finally {
if (null != workload) {
workload.complete();
}
}
} else {
logger.info("\nSKIPPED: Data Load and schema creation as -l argument not specified");
}
// Execute multi-threaded query sets
if (executeQuerySets) {
logger.info("\nStarting to apply Execute Queries...");
workloadExecutor.add(new QueryExecutor(parser, phoenixUtil, workloadExecutor, parser.getDataModels(), queryHint, isFunctional, writeRuntimeResults));
} else {
logger.info("\nSKIPPED: Multithreaded query set execution as -q argument not specified");
}
// Clean up the monitor explicitly
if (monitorManager != null) {
logger.info("Run completed. Shutting down Monitor.");
monitorManager.complete();
}
// Collect any final jobs
workloadExecutor.get();
} finally {
if (workloadExecutor != null) {
logger.info("Run completed. Shutting down thread pool.");
workloadExecutor.shutdown();
}
}
}
Aggregations