use of org.apache.hadoop.ozone.client.OzoneClientException in project ozone by apache.
the class OzoneAddress method createClient.
public OzoneClient createClient(MutableConfigurationSource conf) throws IOException, OzoneClientException {
OzoneClient client;
String scheme = ozoneURI.getScheme();
if (ozoneURI.getScheme() == null || scheme.isEmpty()) {
scheme = OZONE_RPC_SCHEME;
}
if (scheme.equals(OZONE_HTTP_SCHEME)) {
throw new UnsupportedOperationException("REST schema is not supported any more. Please use AWS S3 protocol " + "if you need REST interface.");
} else if (!scheme.equals(OZONE_RPC_SCHEME)) {
throw new OzoneClientException("Invalid URI, unknown protocol scheme: " + scheme + ". Use " + OZONE_RPC_SCHEME + ":// as the scheme");
}
if (ozoneURI.getHost() != null && !ozoneURI.getAuthority().equals(EMPTY_HOST)) {
if (OmUtils.isOmHAServiceId(conf, ozoneURI.getHost())) {
// When host is an HA service ID
if (ozoneURI.getPort() != -1) {
throw new OzoneClientException("Port " + ozoneURI.getPort() + " specified in URI but host '" + ozoneURI.getHost() + "' is a logical (HA) OzoneManager " + "and does not use port information.");
}
client = createRpcClient(conf);
} else if (ozoneURI.getPort() == -1) {
client = createRpcClientFromHostPort(ozoneURI.getHost(), OmUtils.getOmRpcPort(conf), conf);
} else {
client = createRpcClientFromHostPort(ozoneURI.getHost(), ozoneURI.getPort(), conf);
}
} else {
// When host is not specified
Collection<String> omServiceIds = conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY);
if (omServiceIds.size() > 1) {
throw new OzoneClientException("Service ID or host name must not" + " be omitted when multiple ozone.om.service.ids is defined.");
} else if (omServiceIds.size() == 1) {
client = createRpcClientFromServiceId(omServiceIds.iterator().next(), conf);
} else {
client = createRpcClient(conf);
}
}
return client;
}
use of org.apache.hadoop.ozone.client.OzoneClientException in project ozone by apache.
the class OMAdmin method createOmClient.
public OzoneManagerProtocolClientSideTranslatorPB createOmClient(String omServiceID, String omHost, boolean forceHA) throws Exception {
OzoneConfiguration conf = parent.getOzoneConf();
if (omHost != null && !omHost.isEmpty()) {
omServiceID = null;
conf.set(OZONE_OM_ADDRESS_KEY, omHost);
} else if (omServiceID == null || omServiceID.isEmpty()) {
omServiceID = getTheOnlyConfiguredOmServiceIdOrThrow();
}
RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class);
String clientId = ClientId.randomId().toString();
if (!forceHA || (forceHA && OmUtils.isOmHAServiceId(conf, omServiceID))) {
OmTransport omTransport = new Hadoop3OmTransportFactory().createOmTransport(conf, parent.getUser(), omServiceID);
return new OzoneManagerProtocolClientSideTranslatorPB(omTransport, clientId);
} else {
throw new OzoneClientException("This command works only on OzoneManager" + " HA cluster. Service ID specified does not match" + " with " + OZONE_OM_SERVICE_IDS_KEY + " defined in the " + "configuration. Configured " + OZONE_OM_SERVICE_IDS_KEY + " are " + conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY) + "\n");
}
}
use of org.apache.hadoop.ozone.client.OzoneClientException in project ozone by apache.
the class GetKeyHandler method execute.
@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String keyName = address.getKeyName();
File dataFile = new File(fileName);
if (dataFile.exists() && dataFile.isDirectory()) {
dataFile = new File(fileName, keyName);
}
if (dataFile.exists() && !force) {
throw new OzoneClientException(dataFile.getPath() + " exists." + " Download would overwrite an existing file. Aborting.");
}
int chunkSize = (int) getConf().getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
try (InputStream input = bucket.readKey(keyName);
OutputStream output = new FileOutputStream(dataFile)) {
IOUtils.copyBytes(input, output, chunkSize);
}
if (isVerbose() && !"/dev/null".equals(dataFile.getAbsolutePath())) {
try (InputStream stream = new FileInputStream(dataFile)) {
String hash = DigestUtils.md5Hex(stream);
out().printf("Downloaded file hash : %s%n", hash);
}
}
}
Aggregations