use of org.apache.hadoop.hive.ql.exec.spark.status.impl.RemoteSparkJobStatus in project hive by apache.
the class RemoteHiveSparkClient method submit.
private SparkJobRef submit(final DriverContext driverContext, final SparkWork sparkWork) throws Exception {
final Context ctx = driverContext.getCtx();
final HiveConf hiveConf = (HiveConf) ctx.getConf();
refreshLocalResources(sparkWork, hiveConf);
final JobConf jobConf = new JobConf(hiveConf);
// update the credential provider location in the jobConf
HiveConfUtil.updateJobCredentialProviders(jobConf);
// Create temporary scratch dir
final Path emptyScratchDir = ctx.getMRTmpPath();
FileSystem fs = emptyScratchDir.getFileSystem(jobConf);
fs.mkdirs(emptyScratchDir);
// make sure NullScanFileSystem can be loaded - HIVE-18442
jobConf.set("fs." + NullScanFileSystem.getBaseScheme() + ".impl", NullScanFileSystem.class.getCanonicalName());
byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf);
byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir);
byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork);
JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes);
if (driverContext.isShutdown()) {
throw new HiveException("Operation is cancelled.");
}
JobHandle<Serializable> jobHandle = remoteClient.submit(job);
RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout);
return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus);
}
use of org.apache.hadoop.hive.ql.exec.spark.status.impl.RemoteSparkJobStatus in project hive by apache.
the class TestSparkTask method testRemoteSparkCancel.
@Test
public void testRemoteSparkCancel() {
RemoteSparkJobStatus jobSts = mock(RemoteSparkJobStatus.class);
when(jobSts.getRemoteJobState()).thenReturn(State.CANCELLED);
when(jobSts.isRemoteActive()).thenReturn(true);
HiveConf hiveConf = new HiveConf();
SessionState.start(hiveConf);
RemoteSparkJobMonitor remoteSparkJobMonitor = new RemoteSparkJobMonitor(hiveConf, jobSts);
Assert.assertEquals(remoteSparkJobMonitor.startMonitor(), 3);
}
use of org.apache.hadoop.hive.ql.exec.spark.status.impl.RemoteSparkJobStatus in project hive by apache.
the class RemoteHiveSparkClient method submit.
private SparkJobRef submit(TaskQueue taskQueue, Context context, SparkWork sparkWork) throws Exception {
final HiveConf hiveConf = (HiveConf) context.getConf();
refreshLocalResources(sparkWork, hiveConf);
final JobConf jobConf = new JobConf(hiveConf);
// update the credential provider location in the jobConf
HiveConfUtil.updateJobCredentialProviders(jobConf);
// Create temporary scratch dir
final Path emptyScratchDir = context.getMRTmpPath();
FileSystem fs = emptyScratchDir.getFileSystem(jobConf);
fs.mkdirs(emptyScratchDir);
// make sure NullScanFileSystem can be loaded - HIVE-18442
jobConf.set("fs." + NullScanFileSystem.getBaseScheme() + ".impl", NullScanFileSystem.class.getCanonicalName());
byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf);
byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir);
byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork);
JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes);
if (taskQueue.isShutdown()) {
throw new HiveException("Operation is cancelled.");
}
JobHandle<Serializable> jobHandle = remoteClient.submit(job);
RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout);
return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus);
}
Aggregations