use of org.apache.hadoop.hdds.conf.ConfigurationSource in project ozone by apache.
the class TestKeyValueHandler method testDeleteContainer.
@Test
public void testDeleteContainer() throws IOException {
final String testDir = GenericTestUtils.getTempPath(TestKeyValueHandler.class.getSimpleName() + "-" + UUID.randomUUID().toString());
try {
final long containerID = 1L;
final ConfigurationSource conf = new OzoneConfiguration();
final ContainerSet containerSet = new ContainerSet();
final VolumeSet volumeSet = Mockito.mock(VolumeSet.class);
Mockito.when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList(new HddsVolume.Builder(testDir).conf(conf).build()));
final int[] interval = new int[1];
interval[0] = 2;
final ContainerMetrics metrics = new ContainerMetrics(interval);
final AtomicInteger icrReceived = new AtomicInteger(0);
final KeyValueHandler kvHandler = new KeyValueHandler(conf, UUID.randomUUID().toString(), containerSet, volumeSet, metrics, c -> icrReceived.incrementAndGet());
kvHandler.setClusterID(UUID.randomUUID().toString());
final ContainerCommandRequestProto createContainer = ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos.Type.CreateContainer).setDatanodeUuid(UUID.randomUUID().toString()).setCreateContainer(ContainerProtos.CreateContainerRequestProto.newBuilder().setContainerType(ContainerType.KeyValueContainer).build()).setContainerID(containerID).setPipelineID(UUID.randomUUID().toString()).build();
kvHandler.handleCreateContainer(createContainer, null);
Assert.assertEquals(1, icrReceived.get());
Assert.assertNotNull(containerSet.getContainer(containerID));
kvHandler.deleteContainer(containerSet.getContainer(containerID), true);
Assert.assertEquals(2, icrReceived.get());
Assert.assertNull(containerSet.getContainer(containerID));
} finally {
FileUtils.deleteDirectory(new File(testDir));
}
}
use of org.apache.hadoop.hdds.conf.ConfigurationSource in project ozone by apache.
the class HttpServer2 method hasAdministratorAccess.
/**
* Does the user sending the HttpServletRequest has the administrator ACLs? If
* it isn't the case, response will be modified to send an error to the user.
*
* @param response used to send the error response if user does not have
* admin access.
* @return true if admin-authorized, false otherwise
* @throws IOException
*/
public static boolean hasAdministratorAccess(ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException {
ConfigurationSource conf = (ConfigurationSource) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
// If there is no authorization, anybody has administrator access.
if (!conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
return true;
}
String remoteUser = request.getRemoteUser();
if (remoteUser == null) {
response.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthenticated users are not " + "authorized to access this page.");
return false;
}
if (servletContext.getAttribute(ADMINS_ACL) != null && !userHasAdministratorAccess(servletContext, remoteUser)) {
response.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthenticated users are not " + "authorized to access this page.");
LOG.warn("User {} is unauthorized to access the page {}.", remoteUser, request.getRequestURI());
return false;
}
return true;
}
use of org.apache.hadoop.hdds.conf.ConfigurationSource in project ozone by apache.
the class HttpServer2 method isInstrumentationAccessAllowed.
/**
* Checks the user has privileges to access to instrumentation servlets.
* <p/>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to
* FALSE
* (default value) it always returns TRUE.
* <p/>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to
* TRUE
* it will check that if the current user is in the admin ACLS. If the user is
* in the admin ACLs it returns TRUE, otherwise it returns FALSE.
*
* @param servletContext the servlet context.
* @param request the servlet request.
* @param response the servlet response.
* @return TRUE/FALSE based on the logic described above.
*/
public static boolean isInstrumentationAccessAllowed(ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException {
ConfigurationSource conf = (ConfigurationSource) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
boolean access = true;
boolean adminAccess = conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, false);
if (adminAccess) {
access = hasAdministratorAccess(servletContext, request, response);
}
return access;
}
use of org.apache.hadoop.hdds.conf.ConfigurationSource in project ozone by apache.
the class TestNodeStateManager method setUp.
@BeforeEach
public void setUp() {
conf = new ConfigurationSource() {
@Override
public String get(String key) {
return null;
}
@Override
public Collection<String> getConfigKeys() {
return null;
}
@Override
public char[] getPassword(String key) throws IOException {
return new char[0];
}
};
eventPublisher = new MockEventPublisher();
LayoutVersionManager mockVersionManager = Mockito.mock(HDDSLayoutVersionManager.class);
Mockito.when(mockVersionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
Mockito.when(mockVersionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
nsm = new NodeStateManager(conf, eventPublisher, mockVersionManager);
}
use of org.apache.hadoop.hdds.conf.ConfigurationSource in project ozone by apache.
the class BasicRootedOzoneFileSystem method initialize.
@Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
setConf(conf);
Preconditions.checkNotNull(name.getScheme(), "No scheme provided in %s", name);
Preconditions.checkArgument(getScheme().equals(name.getScheme()), "Invalid scheme provided in %s", name);
String authority = name.getAuthority();
if (authority == null) {
// ofs:/// is passed to the client. matcher will NPE if authority is null
throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
}
String omHostOrServiceId;
int omPort = -1;
// Parse hostname and port
String[] parts = authority.split(":");
if (parts.length > 2) {
throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
}
omHostOrServiceId = parts[0];
if (parts.length == 2) {
try {
omPort = Integer.parseInt(parts[1]);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
}
}
try {
uri = new URIBuilder().setScheme(OZONE_OFS_URI_SCHEME).setHost(authority).build();
LOG.trace("Ozone URI for OFS initialization is " + uri);
ConfigurationSource source = getConfSource();
this.adapter = createAdapter(source, omHostOrServiceId, omPort);
this.adapterImpl = (BasicRootedOzoneClientAdapterImpl) this.adapter;
try {
this.userName = UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException e) {
this.userName = OZONE_DEFAULT_USER;
}
this.workingDir = new Path(OZONE_USER_DIR, this.userName).makeQualified(this.uri, this.workingDir);
} catch (URISyntaxException ue) {
final String msg = "Invalid Ozone endpoint " + name;
LOG.error(msg, ue);
throw new IOException(msg, ue);
}
}
Aggregations