use of java.util.LinkedHashSet in project hadoop by apache.
the class TestZKRMStateStorePerf method run.
@SuppressWarnings("unchecked")
@Override
public int run(String[] args) {
LOG.info("Starting ZKRMStateStorePerf ver." + version);
int numApp = ZK_PERF_NUM_APP_DEFAULT;
int numAppAttemptPerApp = ZK_PERF_NUM_APPATTEMPT_PER_APP;
String hostPort = null;
boolean launchLocalZK = true;
if (args.length == 0) {
System.err.println("Missing arguments.");
return -1;
}
for (int i = 0; i < args.length; i++) {
// parse command line
if (args[i].equalsIgnoreCase("-appsize")) {
numApp = Integer.parseInt(args[++i]);
} else if (args[i].equalsIgnoreCase("-appattemptsize")) {
numAppAttemptPerApp = Integer.parseInt(args[++i]);
} else if (args[i].equalsIgnoreCase("-hostPort")) {
hostPort = args[++i];
launchLocalZK = false;
} else if (args[i].equalsIgnoreCase("-workingZnode")) {
workingZnode = args[++i];
} else {
System.err.println("Illegal argument: " + args[i]);
return -1;
}
}
if (launchLocalZK) {
try {
setUpZKServer();
} catch (Exception e) {
System.err.println("failed to setup. : " + e.getMessage());
return -1;
}
}
initStore(hostPort);
long submitTime = System.currentTimeMillis();
long startTime = System.currentTimeMillis() + 1234;
ArrayList<ApplicationId> applicationIds = new ArrayList<>();
ArrayList<RMApp> rmApps = new ArrayList<>();
ArrayList<ApplicationAttemptId> attemptIds = new ArrayList<>();
HashMap<ApplicationId, Set<ApplicationAttemptId>> appIdsToAttemptId = new HashMap<>();
TestDispatcher dispatcher = new TestDispatcher();
store.setRMDispatcher(dispatcher);
for (int i = 0; i < numApp; i++) {
ApplicationId appId = ApplicationId.newInstance(clusterTimeStamp, i);
applicationIds.add(appId);
ArrayList<ApplicationAttemptId> attemptIdsForThisApp = new ArrayList<>();
for (int j = 0; j < numAppAttemptPerApp; j++) {
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, j);
attemptIdsForThisApp.add(attemptId);
}
appIdsToAttemptId.put(appId, new LinkedHashSet(attemptIdsForThisApp));
attemptIds.addAll(attemptIdsForThisApp);
}
for (ApplicationId appId : applicationIds) {
RMApp app = null;
try {
app = storeApp(store, appId, submitTime, startTime);
} catch (Exception e) {
System.err.println("failed to create Application Znode. : " + e.getMessage());
return -1;
}
waitNotify(dispatcher);
rmApps.add(app);
}
for (ApplicationAttemptId attemptId : attemptIds) {
Token<AMRMTokenIdentifier> tokenId = generateAMRMToken(attemptId, appTokenMgr);
SecretKey clientTokenKey = clientToAMTokenMgr.createMasterKey(attemptId);
try {
storeAttempt(store, attemptId, ContainerId.newContainerId(attemptId, 0L).toString(), tokenId, clientTokenKey, dispatcher);
} catch (Exception e) {
System.err.println("failed to create AppAttempt Znode. : " + e.getMessage());
return -1;
}
}
long storeStart = System.currentTimeMillis();
try {
store.loadState();
} catch (Exception e) {
System.err.println("failed to locaState from ZKRMStateStore. : " + e.getMessage());
return -1;
}
long storeEnd = System.currentTimeMillis();
long loadTime = storeEnd - storeStart;
String resultMsg = "ZKRMStateStore takes " + loadTime + " msec to loadState.";
LOG.info(resultMsg);
System.out.println(resultMsg);
// cleanup
try {
for (RMApp app : rmApps) {
ApplicationStateData appState = ApplicationStateData.newInstance(app.getSubmitTime(), app.getStartTime(), app.getApplicationSubmissionContext(), app.getUser());
ApplicationId appId = app.getApplicationId();
Map m = mock(Map.class);
when(m.keySet()).thenReturn(appIdsToAttemptId.get(appId));
appState.attempts = m;
store.removeApplicationStateInternal(appState);
}
} catch (Exception e) {
System.err.println("failed to cleanup. : " + e.getMessage());
return -1;
}
return 0;
}
use of java.util.LinkedHashSet in project hive by apache.
the class UnitTestPropertiesParser method generateFullTestSet.
private LinkedHashMap<String, LinkedHashSet<TestInfo>> generateFullTestSet(RootConfig rootConfig, Map<String, ModuleConfig> moduleConfigs, List<TestDir> unitTestDirs) throws IOException {
LinkedHashMap<String, LinkedHashSet<TestInfo>> result = new LinkedHashMap<>();
for (TestDir unitTestDir : unitTestDirs) {
for (File classFile : fileListProvider.listFiles(unitTestDir.path, new String[] { "class" }, true)) {
String className = classFile.getName();
if (className.startsWith("Test") && !className.contains("$")) {
String testName = className.replaceAll("\\.class$", "");
String pathPrefix = getPathPrefix(classFile, rootConfig.subDirForPrefix);
String moduleName = getModuleNameFromPathPrefix(pathPrefix);
logger.debug("In {}, found class {} with pathPrefix={}, moduleName={}", unitTestDir.path, className, pathPrefix, moduleName);
ModuleConfig moduleConfig = moduleConfigs.get(moduleName);
if (moduleConfig == null) {
moduleConfig = FAKE_MODULE_CONFIG;
}
TestInfo testInfo = checkAndGetTestInfo(moduleName, pathPrefix, testName, rootConfig, moduleConfig);
if (testInfo != null) {
logger.info("Adding test: " + testInfo);
addTestToResult(result, testInfo);
}
} else {
logger.trace("In {}, found class {} with pathPrefix={}. Not a test", unitTestDir.path, className);
}
}
}
return result;
}
use of java.util.LinkedHashSet in project kafka by apache.
the class ConnectorConfig method enrich.
/**
* Returns an enriched {@link ConfigDef} building upon the {@code ConfigDef}, using the current configuration specified in {@code props} as an input.
* <p>
* {@code requireFullConfig} specifies whether required config values that are missing should cause an exception to be thrown.
*/
public static ConfigDef enrich(ConfigDef baseConfigDef, Map<String, String> props, boolean requireFullConfig) {
final List<String> transformAliases = (List<String>) ConfigDef.parseType(TRANSFORMS_CONFIG, props.get(TRANSFORMS_CONFIG), Type.LIST);
if (transformAliases == null || transformAliases.isEmpty()) {
return baseConfigDef;
}
final ConfigDef newDef = new ConfigDef(baseConfigDef);
for (String alias : new LinkedHashSet<>(transformAliases)) {
final String prefix = TRANSFORMS_CONFIG + "." + alias + ".";
final String group = TRANSFORMS_GROUP + ": " + alias;
int orderInGroup = 0;
final String transformationTypeConfig = prefix + "type";
final ConfigDef.Validator typeValidator = new ConfigDef.Validator() {
@Override
public void ensureValid(String name, Object value) {
getConfigDefFromTransformation(transformationTypeConfig, (Class) value);
}
};
newDef.define(transformationTypeConfig, Type.CLASS, ConfigDef.NO_DEFAULT_VALUE, typeValidator, Importance.HIGH, "Class for the '" + alias + "' transformation.", group, orderInGroup++, Width.LONG, "Transformation type for " + alias, Collections.<String>emptyList(), new TransformationClassRecommender());
final ConfigDef transformationConfigDef;
try {
final String className = props.get(transformationTypeConfig);
final Class<?> cls = (Class<?>) ConfigDef.parseType(transformationTypeConfig, className, Type.CLASS);
transformationConfigDef = getConfigDefFromTransformation(transformationTypeConfig, cls);
} catch (ConfigException e) {
if (requireFullConfig) {
throw e;
} else {
continue;
}
}
newDef.embed(prefix, group, orderInGroup, transformationConfigDef);
}
return newDef;
}
use of java.util.LinkedHashSet in project hive by apache.
the class LineageLogger method createSourceVertices.
/**
* Convert a list of columns to a set of vertices.
* Use cached vertices if possible.
*/
private Set<Vertex> createSourceVertices(Map<String, Vertex> vertexCache, Collection<BaseColumnInfo> baseCols) {
Set<Vertex> sources = new LinkedHashSet<Vertex>();
if (baseCols != null && !baseCols.isEmpty()) {
for (BaseColumnInfo col : baseCols) {
Table table = col.getTabAlias().getTable();
if (table.isTemporary()) {
// Ignore temporary tables
continue;
}
Vertex.Type type = Vertex.Type.TABLE;
String tableName = table.getDbName() + "." + table.getTableName();
FieldSchema fieldSchema = col.getColumn();
String label = tableName;
if (fieldSchema != null) {
type = Vertex.Type.COLUMN;
label = tableName + "." + fieldSchema.getName();
}
sources.add(getOrCreateVertex(vertexCache, label, type));
}
}
return sources;
}
use of java.util.LinkedHashSet in project hive by apache.
the class CorrelationOptimizer method findPossibleAutoConvertedJoinOperators.
private void findPossibleAutoConvertedJoinOperators() throws SemanticException {
// based on hive.auto.convert.join.noconditionaltask.size.
for (JoinOperator joinOp : pCtx.getJoinOps()) {
boolean isAbleToGuess = true;
boolean mayConvert = false;
// Get total size and individual alias's size
long aliasTotalKnownInputSize = 0;
Map<String, Long> aliasToSize = new HashMap<String, Long>();
Map<Integer, Set<String>> posToAliases = new HashMap<Integer, Set<String>>();
for (int pos = 0; pos < joinOp.getNumParent(); pos++) {
Operator<? extends OperatorDesc> op = joinOp.getParentOperators().get(pos);
Set<TableScanOperator> topOps = CorrelationUtilities.findTableScanOperators(op);
if (topOps.isEmpty()) {
isAbleToGuess = false;
break;
}
Set<String> aliases = new LinkedHashSet<String>();
for (TableScanOperator tsop : topOps) {
Table table = tsop.getConf().getTableMetadata();
if (table == null) {
// table should not be null.
throw new SemanticException("The table of " + tsop.getName() + " " + tsop.getIdentifier() + " is null, which is not expected.");
}
String alias = tsop.getConf().getAlias();
aliases.add(alias);
Path p = table.getPath();
ContentSummary resultCs = null;
try {
FileSystem fs = table.getPath().getFileSystem(pCtx.getConf());
resultCs = fs.getContentSummary(p);
} catch (IOException e) {
LOG.warn("Encounter a error while querying content summary of table " + table.getCompleteName() + " from FileSystem. " + "Cannot guess if CommonJoinOperator will optimize " + joinOp.getName() + " " + joinOp.getIdentifier());
}
if (resultCs == null) {
isAbleToGuess = false;
break;
}
long size = resultCs.getLength();
aliasTotalKnownInputSize += size;
Long es = aliasToSize.get(alias);
if (es == null) {
es = new Long(0);
}
es += size;
aliasToSize.put(alias, es);
}
posToAliases.put(pos, aliases);
}
if (!isAbleToGuess) {
LOG.info("Cannot guess if CommonJoinOperator will optimize " + joinOp.getName() + " " + joinOp.getIdentifier());
continue;
}
JoinDesc joinDesc = joinOp.getConf();
Byte[] order = joinDesc.getTagOrder();
int numAliases = order.length;
Set<Integer> bigTableCandidates = MapJoinProcessor.getBigTableCandidates(joinDesc.getConds());
if (bigTableCandidates.isEmpty()) {
continue;
}
long ThresholdOfSmallTblSizeSum = HiveConf.getLongVar(pCtx.getConf(), HiveConf.ConfVars.HIVESMALLTABLESFILESIZE);
for (int i = 0; i < numAliases; i++) {
// this table cannot be big table
if (!bigTableCandidates.contains(i)) {
continue;
}
Set<String> aliases = posToAliases.get(i);
long aliasKnownSize = Utilities.sumOf(aliasToSize, aliases);
if (!CommonJoinTaskDispatcher.cannotConvert(aliasKnownSize, aliasTotalKnownInputSize, ThresholdOfSmallTblSizeSum)) {
mayConvert = true;
}
}
if (mayConvert) {
LOG.info(joinOp.getName() + " " + joinOp.getIdentifier() + " may be converted to MapJoin by CommonJoinResolver");
skipedJoinOperators.add(joinOp);
}
}
}
Aggregations