use of org.apache.hadoop.yarn.api.records.LocalResource in project ignite by apache.
the class IgniteYarnUtils method setupFile.
/**
* @param file Path.
* @param fs File system.
* @param type Local resource type.
* @throws Exception If failed.
*/
public static LocalResource setupFile(Path file, FileSystem fs, LocalResourceType type) throws Exception {
LocalResource resource = Records.newRecord(LocalResource.class);
file = fs.makeQualified(file);
FileStatus stat = fs.getFileStatus(file);
resource.setResource(ConverterUtils.getYarnUrlFromPath(file));
resource.setSize(stat.getLen());
resource.setTimestamp(stat.getModificationTime());
resource.setType(type);
resource.setVisibility(LocalResourceVisibility.APPLICATION);
return resource;
}
use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.
the class TestTezTask method testExistingSessionGetsStorageHandlerResources.
@Test
public void testExistingSessionGetsStorageHandlerResources() throws Exception {
final String[] inputOutputJars = new String[] { "file:///tmp/foo.jar" };
LocalResource res = createResource(inputOutputJars[0]);
final List<LocalResource> resources = Collections.singletonList(res);
when(utils.localizeTempFiles(anyString(), any(Configuration.class), eq(inputOutputJars), any(String[].class))).thenReturn(resources);
when(sessionState.isOpen()).thenReturn(true);
when(sessionState.isOpening()).thenReturn(false);
task.ensureSessionHasResources(sessionState, inputOutputJars);
// TODO: ideally we should have a test for session itself.
verify(sessionState).ensureLocalResources(any(Configuration.class), eq(inputOutputJars));
}
use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.
the class GenericUDTFGetSplits method getSplits.
public InputSplit[] getSplits(JobConf job, int numSplits, TezWork work, Schema schema, ApplicationId applicationId) throws IOException {
DAG dag = DAG.create(work.getName());
dag.setCredentials(job.getCredentials());
DagUtils utils = DagUtils.getInstance();
Context ctx = new Context(job);
MapWork mapWork = (MapWork) work.getAllWork().get(0);
// bunch of things get setup in the context based on conf but we need only the MR tmp directory
// for the following method.
JobConf wxConf = utils.initializeVertexConf(job, ctx, mapWork);
// TODO: should we also whitelist input formats here? from mapred.input.format.class
Path scratchDir = utils.createTezDir(ctx.getMRScratchDir(), job);
FileSystem fs = scratchDir.getFileSystem(job);
try {
LocalResource appJarLr = createJarLocalResource(utils.getExecJarPathLocal(ctx.getConf()), utils, job);
LlapCoordinator coordinator = LlapCoordinator.getInstance();
if (coordinator == null) {
throw new IOException("LLAP coordinator is not initialized; must be running in HS2 with " + ConfVars.LLAP_HS2_ENABLE_COORDINATOR.varname + " enabled");
}
// Update the queryId to use the generated applicationId. See comment below about
// why this is done.
HiveConf.setVar(wxConf, HiveConf.ConfVars.HIVEQUERYID, applicationId.toString());
Vertex wx = utils.createVertex(wxConf, mapWork, scratchDir, fs, ctx, false, work, work.getVertexType(mapWork), DagUtils.createTezLrMap(appJarLr, null));
String vertexName = wx.getName();
dag.addVertex(wx);
utils.addCredentials(mapWork, dag);
// we have the dag now proceed to get the splits:
Preconditions.checkState(HiveConf.getBoolVar(wxConf, ConfVars.HIVE_TEZ_GENERATE_CONSISTENT_SPLITS));
Preconditions.checkState(HiveConf.getBoolVar(wxConf, ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS));
HiveSplitGenerator splitGenerator = new HiveSplitGenerator(wxConf, mapWork);
List<Event> eventList = splitGenerator.initialize();
InputSplit[] result = new InputSplit[eventList.size() - 1];
InputConfigureVertexTasksEvent configureEvent = (InputConfigureVertexTasksEvent) eventList.get(0);
List<TaskLocationHint> hints = configureEvent.getLocationHint().getTaskLocationHints();
Preconditions.checkState(hints.size() == eventList.size() - 1);
if (LOG.isDebugEnabled()) {
LOG.debug("NumEvents=" + eventList.size() + ", NumSplits=" + result.length);
}
// This assumes LLAP cluster owner is always the HS2 user.
String llapUser = UserGroupInformation.getLoginUser().getShortUserName();
String queryUser = null;
byte[] tokenBytes = null;
LlapSigner signer = null;
if (UserGroupInformation.isSecurityEnabled()) {
signer = coordinator.getLlapSigner(job);
// 1. Generate the token for query user (applies to all splits).
queryUser = SessionState.getUserFromAuthenticator();
if (queryUser == null) {
queryUser = UserGroupInformation.getCurrentUser().getUserName();
LOG.warn("Cannot determine the session user; using " + queryUser + " instead");
}
LlapTokenLocalClient tokenClient = coordinator.getLocalTokenClient(job, llapUser);
// We put the query user, not LLAP user, into the message and token.
Token<LlapTokenIdentifier> token = tokenClient.createToken(applicationId.toString(), queryUser, true);
LOG.info("Created the token for remote user: {}", token);
bos.reset();
token.write(dos);
tokenBytes = bos.toByteArray();
} else {
queryUser = UserGroupInformation.getCurrentUser().getUserName();
}
// Generate umbilical token (applies to all splits)
Token<JobTokenIdentifier> umbilicalToken = JobTokenCreator.createJobToken(applicationId);
LOG.info("Number of splits: " + (eventList.size() - 1));
SignedMessage signedSvs = null;
for (int i = 0; i < eventList.size() - 1; i++) {
TaskSpec taskSpec = new TaskSpecBuilder().constructTaskSpec(dag, vertexName, eventList.size() - 1, applicationId, i);
// 2. Generate the vertex/submit information for all events.
if (i == 0) {
// The queryId could either be picked up from the current request being processed, or
// generated. The current request isn't exactly correct since the query is 'done' once we
// return the results. Generating a new one has the added benefit of working once this
// is moved out of a UDTF into a proper API.
// Setting this to the generated AppId which is unique.
// Despite the differences in TaskSpec, the vertex spec should be the same.
signedSvs = createSignedVertexSpec(signer, taskSpec, applicationId, queryUser, applicationId.toString());
}
SubmitWorkInfo submitWorkInfo = new SubmitWorkInfo(applicationId, System.currentTimeMillis(), taskSpec.getVertexParallelism(), signedSvs.message, signedSvs.signature, umbilicalToken);
byte[] submitWorkBytes = SubmitWorkInfo.toBytes(submitWorkInfo);
// 3. Generate input event.
SignedMessage eventBytes = makeEventBytes(wx, vertexName, eventList.get(i + 1), signer);
// 4. Make location hints.
SplitLocationInfo[] locations = makeLocationHints(hints.get(i));
result[i] = new LlapInputSplit(i, submitWorkBytes, eventBytes.message, eventBytes.signature, locations, schema, llapUser, tokenBytes);
}
return result;
} catch (Exception e) {
throw new IOException(e);
}
}
use of org.apache.hadoop.yarn.api.records.LocalResource in project hive by apache.
the class DagUtils method addHdfsResource.
private void addHdfsResource(Configuration conf, List<LocalResource> tmpResources, LocalResourceType type, String[] files) throws IOException {
for (String file : files) {
if (StringUtils.isNotBlank(file)) {
Path dest = new Path(file);
FileSystem destFS = dest.getFileSystem(conf);
LocalResource localResource = createLocalResource(destFS, dest, type, LocalResourceVisibility.PRIVATE);
tmpResources.add(localResource);
}
}
}
use of org.apache.hadoop.yarn.api.records.LocalResource in project hadoop by apache.
the class RMWebServices method createContainerLaunchContext.
/**
* Create the ContainerLaunchContext required for the
* ApplicationSubmissionContext. This function takes the user information and
* generates the ByteBuffer structures required by the ContainerLaunchContext
*
* @param newApp
* the information provided by the user
* @return created context
* @throws BadRequestException
* @throws IOException
*/
protected ContainerLaunchContext createContainerLaunchContext(ApplicationSubmissionContextInfo newApp) throws BadRequestException, IOException {
// create container launch context
HashMap<String, ByteBuffer> hmap = new HashMap<String, ByteBuffer>();
for (Map.Entry<String, String> entry : newApp.getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) {
if (entry.getValue().isEmpty() == false) {
Base64 decoder = new Base64(0, null, true);
byte[] data = decoder.decode(entry.getValue());
hmap.put(entry.getKey(), ByteBuffer.wrap(data));
}
}
HashMap<String, LocalResource> hlr = new HashMap<String, LocalResource>();
for (Map.Entry<String, LocalResourceInfo> entry : newApp.getContainerLaunchContextInfo().getResources().entrySet()) {
LocalResourceInfo l = entry.getValue();
LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()), l.getType(), l.getVisibility(), l.getSize(), l.getTimestamp());
hlr.put(entry.getKey(), lr);
}
DataOutputBuffer out = new DataOutputBuffer();
Credentials cs = createCredentials(newApp.getContainerLaunchContextInfo().getCredentials());
cs.writeTokenStorageToStream(out);
ByteBuffer tokens = ByteBuffer.wrap(out.getData());
ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr, newApp.getContainerLaunchContextInfo().getEnvironment(), newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens, newApp.getContainerLaunchContextInfo().getAcls());
return ctx;
}
Aggregations