diff --git a/.gitignore b/.gitignore
index 1a67643fdd17..274a0740c85e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,5 +25,11 @@ linklint/
**/*.log
tmp
**/.flattened-pom.xml
+.sw*
+.*.sw*
+ID
+filenametags
+tags
+.codegenie/
.vscode/
**/__pycache__
diff --git a/.rubocop.yml b/.rubocop.yml
index f877a052eea6..e1eb10a9245b 100644
--- a/.rubocop.yml
+++ b/.rubocop.yml
@@ -9,3 +9,12 @@ Layout/LineLength:
Metrics/MethodLength:
Max: 75
+
+GlobalVars:
+ AllowedVariables:
+ - $CUST1_ENCODED
+ - $CUST1_ALIAS
+ - $CUST1_ENCODED
+ - $GLOB_CUST_ENCODED
+ - $TEST
+ - $TEST_CLUSTER
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 1c08ec3b26fd..2b9149be9e05 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2664,4 +2664,28 @@ List getLogEntries(Set serverNames, String logType, Server
@InterfaceAudience.Private
void restoreBackupSystemTable(String snapshotName) throws IOException;
+
+ /**
+ * Refresh the system key cache on all specified region servers.
+ * @param regionServers the list of region servers to refresh the system key cache on
+ */
+ void refreshSystemKeyCacheOnServers(List regionServers) throws IOException;
+
+ /**
+ * Eject a specific managed key entry from the managed key data cache on all specified region
+ * servers.
+ * @param regionServers the list of region servers to eject the managed key entry from
+ * @param keyCustodian the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadata the key metadata
+ */
+ void ejectManagedKeyDataCacheEntryOnServers(List regionServers, byte[] keyCustodian,
+ String keyNamespace, String keyMetadata) throws IOException;
+
+ /**
+ * Clear all entries in the managed key data cache on all specified region servers without having
+ * to restart the process.
+ * @param regionServers the list of region servers to clear the managed key data cache on
+ */
+ void clearManagedKeyDataCacheOnServers(List regionServers) throws IOException;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
index e6bf6c3d28e0..42c1edb4c52e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
@@ -1146,4 +1146,21 @@ public List getCachedFilesList(ServerName serverName) throws IOException
public void restoreBackupSystemTable(String snapshotName) throws IOException {
get(admin.restoreBackupSystemTable(snapshotName));
}
+
+ @Override
+ public void refreshSystemKeyCacheOnServers(List regionServers) throws IOException {
+ get(admin.refreshSystemKeyCacheOnServers(regionServers));
+ }
+
+ @Override
+ public void ejectManagedKeyDataCacheEntryOnServers(List regionServers,
+ byte[] keyCustodian, String keyNamespace, String keyMetadata) throws IOException {
+ get(admin.ejectManagedKeyDataCacheEntryOnServers(regionServers, keyCustodian, keyNamespace,
+ keyMetadata));
+ }
+
+ @Override
+ public void clearManagedKeyDataCacheOnServers(List regionServers) throws IOException {
+ get(admin.clearManagedKeyDataCacheOnServers(regionServers));
+ }
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index ec0556f20ac1..3c1f90f5bc40 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -1874,4 +1874,27 @@ CompletableFuture> getLogEntries(Set serverNames, Str
@InterfaceAudience.Private
CompletableFuture restoreBackupSystemTable(String snapshotName);
+
+ /**
+ * Refresh the system key cache on all specified region servers.
+ * @param regionServers the list of region servers to refresh the system key cache on
+ */
+ CompletableFuture refreshSystemKeyCacheOnServers(List regionServers);
+
+ /**
+ * Eject a specific managed key entry from the managed key data cache on all specified region
+ * servers.
+ * @param regionServers the list of region servers to eject the managed key entry from
+ * @param keyCustodian the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadata the key metadata
+ */
+ CompletableFuture ejectManagedKeyDataCacheEntryOnServers(List regionServers,
+ byte[] keyCustodian, String keyNamespace, String keyMetadata);
+
+ /**
+ * Clear all entries in the managed key data cache on all specified region servers.
+ * @param regionServers the list of region servers to clear the managed key data cache on
+ */
+ CompletableFuture clearManagedKeyDataCacheOnServers(List regionServers);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index b1fb2be13547..dc54b5880bea 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -686,6 +686,23 @@ public CompletableFuture updateConfiguration(String groupName) {
return wrap(rawAdmin.updateConfiguration(groupName));
}
+ @Override
+ public CompletableFuture refreshSystemKeyCacheOnServers(List regionServers) {
+ return wrap(rawAdmin.refreshSystemKeyCacheOnServers(regionServers));
+ }
+
+ @Override
+ public CompletableFuture ejectManagedKeyDataCacheEntryOnServers(
+ List regionServers, byte[] keyCustodian, String keyNamespace, String keyMetadata) {
+ return wrap(rawAdmin.ejectManagedKeyDataCacheEntryOnServers(regionServers, keyCustodian,
+ keyNamespace, keyMetadata));
+ }
+
+ @Override
+ public CompletableFuture clearManagedKeyDataCacheOnServers(List regionServers) {
+ return wrap(rawAdmin.clearManagedKeyDataCacheOnServers(regionServers));
+ }
+
@Override
public CompletableFuture rollWALWriter(ServerName serverName) {
return wrap(rawAdmin.rollWALWriter(serverName));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
index 369b2be8ecda..ea8d81043694 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
@@ -114,6 +114,9 @@ public interface ColumnFamilyDescriptor {
/** Returns Return the raw crypto key attribute for the family, or null if not set */
byte[] getEncryptionKey();
+ /** Returns the encryption key namespace for this family */
+ String getEncryptionKeyNamespace();
+
/** Returns Return the encryption algorithm in use by this family */
String getEncryptionType();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index 42f25fdc56f4..12bb73565078 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -167,6 +167,10 @@ public class ColumnFamilyDescriptorBuilder {
@InterfaceAudience.Private
public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
private static final Bytes ENCRYPTION_KEY_BYTES = new Bytes(Bytes.toBytes(ENCRYPTION_KEY));
+ @InterfaceAudience.Private
+ public static final String ENCRYPTION_KEY_NAMESPACE = "ENCRYPTION_KEY_NAMESPACE";
+ private static final Bytes ENCRYPTION_KEY_NAMESPACE_BYTES =
+ new Bytes(Bytes.toBytes(ENCRYPTION_KEY_NAMESPACE));
private static final boolean DEFAULT_MOB = false;
@InterfaceAudience.Private
@@ -320,6 +324,7 @@ public static Map getDefaultValues() {
DEFAULT_VALUES.keySet().forEach(s -> RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s))));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
+ RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY_NAMESPACE)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(IS_MOB)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(MOB_THRESHOLD)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)));
@@ -522,6 +527,11 @@ public ColumnFamilyDescriptorBuilder setEncryptionKey(final byte[] value) {
return this;
}
+ public ColumnFamilyDescriptorBuilder setEncryptionKeyNamespace(final String value) {
+ desc.setEncryptionKeyNamespace(value);
+ return this;
+ }
+
public ColumnFamilyDescriptorBuilder setEncryptionType(String value) {
desc.setEncryptionType(value);
return this;
@@ -1337,6 +1347,20 @@ public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) {
return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes));
}
+ @Override
+ public String getEncryptionKeyNamespace() {
+ return getStringOrDefault(ENCRYPTION_KEY_NAMESPACE_BYTES, Function.identity(), null);
+ }
+
+ /**
+ * Set the encryption key namespace attribute for the family
+ * @param keyNamespace the key namespace, or null to remove existing setting
+ * @return this (for chained invocation)
+ */
+ public ModifyableColumnFamilyDescriptor setEncryptionKeyNamespace(String keyNamespace) {
+ return setValue(ENCRYPTION_KEY_NAMESPACE_BYTES, keyNamespace);
+ }
+
@Override
public long getMobThreshold() {
return getStringOrDefault(MOB_THRESHOLD_BYTES, Long::valueOf, DEFAULT_MOB_THRESHOLD);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 710c8c430386..ce967b86bec4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -76,6 +76,7 @@
import org.apache.hadoop.hbase.client.replication.TableCFs;
import org.apache.hadoop.hbase.client.security.SecurityCapability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.quotas.QuotaFilter;
@@ -150,7 +151,10 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.LastHighestWalFilenum;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyEntryRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
@@ -4662,4 +4666,86 @@ MasterProtos.RestoreBackupSystemTableResponse> procedureCall(request,
MasterProtos.RestoreBackupSystemTableResponse::getProcId,
new RestoreBackupSystemTableProcedureBiConsumer());
}
+
+ @Override
+ public CompletableFuture refreshSystemKeyCacheOnServers(List regionServers) {
+ CompletableFuture future = new CompletableFuture<>();
+ List> futures =
+ regionServers.stream().map(this::refreshSystemKeyCache).collect(Collectors.toList());
+ addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture>[0])),
+ (result, err) -> {
+ if (err != null) {
+ future.completeExceptionally(err);
+ } else {
+ future.complete(result);
+ }
+ });
+ return future;
+ }
+
+ private CompletableFuture refreshSystemKeyCache(ServerName serverName) {
+ return this. newAdminCaller()
+ .action((controller, stub) -> this. adminCall(controller, stub,
+ EmptyMsg.getDefaultInstance(),
+ (s, c, req, done) -> s.refreshSystemKeyCache(controller, req, done), resp -> null))
+ .serverName(serverName).call();
+ }
+
+ @Override
+ public CompletableFuture ejectManagedKeyDataCacheEntryOnServers(
+ List regionServers, byte[] keyCustodian, String keyNamespace, String keyMetadata) {
+ CompletableFuture future = new CompletableFuture<>();
+ // Create the request once instead of repeatedly for each server
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash(keyMetadata);
+ ManagedKeyEntryRequest request = ManagedKeyEntryRequest.newBuilder()
+ .setKeyCustNs(ManagedKeyRequest.newBuilder().setKeyCust(ByteString.copyFrom(keyCustodian))
+ .setKeyNamespace(keyNamespace).build())
+ .setKeyMetadataHash(ByteString.copyFrom(keyMetadataHash)).build();
+ List> futures =
+ regionServers.stream().map(serverName -> ejectManagedKeyDataCacheEntry(serverName, request))
+ .collect(Collectors.toList());
+ addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture>[0])),
+ (result, err) -> {
+ if (err != null) {
+ future.completeExceptionally(err);
+ } else {
+ future.complete(result);
+ }
+ });
+ return future;
+ }
+
+ private CompletableFuture ejectManagedKeyDataCacheEntry(ServerName serverName,
+ ManagedKeyEntryRequest request) {
+ return this. newAdminCaller()
+ .action((controller, stub) -> this. adminCall(controller, stub, request,
+ (s, c, req, done) -> s.ejectManagedKeyDataCacheEntry(controller, req, done),
+ resp -> null))
+ .serverName(serverName).call();
+ }
+
+ @Override
+ public CompletableFuture clearManagedKeyDataCacheOnServers(List regionServers) {
+ CompletableFuture future = new CompletableFuture<>();
+ List> futures =
+ regionServers.stream().map(this::clearManagedKeyDataCache).collect(Collectors.toList());
+ addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture>[0])),
+ (result, err) -> {
+ if (err != null) {
+ future.completeExceptionally(err);
+ } else {
+ future.complete(result);
+ }
+ });
+ return future;
+ }
+
+ private CompletableFuture clearManagedKeyDataCache(ServerName serverName) {
+ return this. newAdminCaller()
+ .action((controller, stub) -> this. adminCall(controller, stub,
+ EmptyMsg.getDefaultInstance(),
+ (s, c, req, done) -> s.clearManagedKeyDataCache(controller, req, done), resp -> null))
+ .serverName(serverName).call();
+ }
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
new file mode 100644
index 000000000000..8c60e1a8b292
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BooleanMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.GetManagedKeysResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyEntryRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ManagedKeysProtos;
+
+@InterfaceAudience.Public
+public class KeymetaAdminClient implements KeymetaAdmin {
+ private ManagedKeysProtos.ManagedKeysService.BlockingInterface stub;
+
+ public KeymetaAdminClient(Connection conn) throws IOException {
+ this.stub =
+ ManagedKeysProtos.ManagedKeysService.newBlockingStub(conn.getAdmin().coprocessorService());
+ }
+
+ @Override
+ public ManagedKeyData enableKeyManagement(byte[] keyCust, String keyNamespace)
+ throws IOException {
+ try {
+ ManagedKeyResponse response = stub.enableKeyManagement(null, ManagedKeyRequest.newBuilder()
+ .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build());
+ return generateKeyData(response);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ @Override
+ public List getManagedKeys(byte[] keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ try {
+ GetManagedKeysResponse statusResponse =
+ stub.getManagedKeys(null, ManagedKeyRequest.newBuilder()
+ .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build());
+ return generateKeyDataList(statusResponse);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ @Override
+ public boolean rotateSTK() throws IOException {
+ try {
+ BooleanMsg response = stub.rotateSTK(null, EmptyMsg.getDefaultInstance());
+ return response.getBoolMsg();
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ @Override
+ public void ejectManagedKeyDataCacheEntry(byte[] keyCustodian, String keyNamespace,
+ String keyMetadata) throws IOException {
+ throw new NotImplementedException(
+ "ejectManagedKeyDataCacheEntry not supported in KeymetaAdminClient");
+ }
+
+ @Override
+ public void clearManagedKeyDataCache() throws IOException {
+ throw new NotImplementedException(
+ "clearManagedKeyDataCache not supported in KeymetaAdminClient");
+ }
+
+ @Override
+ public ManagedKeyData disableKeyManagement(byte[] keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ try {
+ ManagedKeyResponse response = stub.disableKeyManagement(null, ManagedKeyRequest.newBuilder()
+ .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build());
+ return generateKeyData(response);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ @Override
+ public ManagedKeyData disableManagedKey(byte[] keyCust, String keyNamespace,
+ byte[] keyMetadataHash) throws IOException, KeyException {
+ try {
+ ManagedKeyResponse response = stub.disableManagedKey(null,
+ ManagedKeyEntryRequest.newBuilder()
+ .setKeyCustNs(ManagedKeyRequest.newBuilder().setKeyCust(ByteString.copyFrom(keyCust))
+ .setKeyNamespace(keyNamespace).build())
+ .setKeyMetadataHash(ByteString.copyFrom(keyMetadataHash)).build());
+ return generateKeyData(response);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ @Override
+ public ManagedKeyData rotateManagedKey(byte[] keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ try {
+ ManagedKeyResponse response = stub.rotateManagedKey(null, ManagedKeyRequest.newBuilder()
+ .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build());
+ return generateKeyData(response);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ @Override
+ public void refreshManagedKeys(byte[] keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ try {
+ stub.refreshManagedKeys(null, ManagedKeyRequest.newBuilder()
+ .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build());
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ private static List generateKeyDataList(GetManagedKeysResponse stateResponse) {
+ List keyStates = new ArrayList<>();
+ for (ManagedKeyResponse state : stateResponse.getStateList()) {
+ keyStates.add(generateKeyData(state));
+ }
+ return keyStates;
+ }
+
+ private static ManagedKeyData generateKeyData(ManagedKeyResponse response) {
+ // Use hash-only constructor for client-side ManagedKeyData
+ byte[] keyMetadataHash =
+ response.hasKeyMetadataHash() ? response.getKeyMetadataHash().toByteArray() : null;
+ if (keyMetadataHash == null) {
+ return new ManagedKeyData(response.getKeyCust().toByteArray(), response.getKeyNamespace(),
+ ManagedKeyState.forValue((byte) response.getKeyState().getNumber()));
+ } else {
+ return new ManagedKeyData(response.getKeyCust().toByteArray(), response.getKeyNamespace(),
+ ManagedKeyState.forValue((byte) response.getKeyState().getNumber()), keyMetadataHash,
+ response.getRefreshTimestamp());
+ }
+ }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 04fc5201cc10..05a1a4b0b66b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -27,7 +27,6 @@
import org.apache.commons.crypto.cipher.CryptoCipherFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES;
@@ -80,6 +79,21 @@ public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm)
* @return the encrypted key bytes
*/
public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException {
+ return wrapKey(conf, subject, key, null);
+ }
+
+ /**
+ * Protect a key by encrypting it with the secret key of the given subject or kek. The
+ * configuration must be set up correctly for key alias resolution. Only one of the
+ * {@code subject} or {@code kek} needs to be specified and the other one can be {@code null}.
+ * @param conf configuration
+ * @param subject subject key alias
+ * @param key the key
+ * @param kek the key encryption key
+ * @return the encrypted key bytes
+ */
+ public static byte[] wrapKey(Configuration conf, String subject, Key key, Key kek)
+ throws IOException {
// Wrap the key with the configured encryption algorithm.
String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Cipher cipher = Encryption.getCipher(conf, algorithm);
@@ -100,8 +114,12 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws
builder
.setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes)));
ByteArrayOutputStream out = new ByteArrayOutputStream();
- Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher,
- iv);
+ if (kek != null) {
+ Encryption.encryptWithGivenKey(kek, out, new ByteArrayInputStream(keyBytes), cipher, iv);
+ } else {
+ Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf,
+ cipher, iv);
+ }
builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray()));
// Build and return the protobuf message
out.reset();
@@ -118,6 +136,21 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws
* @return the raw key bytes
*/
public static Key unwrapKey(Configuration conf, String subject, byte[] value)
+ throws IOException, KeyException {
+ return unwrapKey(conf, subject, value, null);
+ }
+
+ /**
+ * Unwrap a key by decrypting it with the secret key of the given subject. The configuration must
+ * be set up correctly for key alias resolution. Only one of the {@code subject} or {@code kek}
+ * needs to be specified and the other one can be {@code null}.
+ * @param conf configuration
+ * @param subject subject key alias
+ * @param value the encrypted key bytes
+ * @param kek the key encryption key
+ * @return the raw key bytes
+ */
+ public static Key unwrapKey(Configuration conf, String subject, byte[] value, Key kek)
throws IOException, KeyException {
EncryptionProtos.WrappedKey wrappedKey =
EncryptionProtos.WrappedKey.parser().parseDelimitedFrom(new ByteArrayInputStream(value));
@@ -126,11 +159,12 @@ public static Key unwrapKey(Configuration conf, String subject, byte[] value)
if (cipher == null) {
throw new RuntimeException("Cipher '" + algorithm + "' not available");
}
- return getUnwrapKey(conf, subject, wrappedKey, cipher);
+ return getUnwrapKey(conf, subject, wrappedKey, cipher, kek);
}
private static Key getUnwrapKey(Configuration conf, String subject,
- EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException {
+ EncryptionProtos.WrappedKey wrappedKey, Cipher cipher, Key kek)
+ throws IOException, KeyException {
String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf);
String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim();
if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) {
@@ -143,8 +177,13 @@ private static Key getUnwrapKey(Configuration conf, String subject,
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null;
- Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(),
- subject, conf, cipher, iv);
+ if (kek != null) {
+ Encryption.decryptWithGivenKey(kek, out, wrappedKey.getData().newInput(),
+ wrappedKey.getLength(), cipher, iv);
+ } else {
+ Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(),
+ subject, conf, cipher, iv);
+ }
byte[] keyBytes = out.toByteArray();
if (wrappedKey.hasHash()) {
if (
@@ -176,58 +215,7 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value)
if (cipher == null) {
throw new RuntimeException("Cipher '" + algorithm + "' not available");
}
- return getUnwrapKey(conf, subject, wrappedKey, cipher);
- }
-
- /**
- * Helper to create an encyption context.
- * @param conf The current configuration.
- * @param family The current column descriptor.
- * @return The created encryption context.
- * @throws IOException if an encryption key for the column cannot be unwrapped
- * @throws IllegalStateException in case of encryption related configuration errors
- */
- public static Encryption.Context createEncryptionContext(Configuration conf,
- ColumnFamilyDescriptor family) throws IOException {
- Encryption.Context cryptoContext = Encryption.Context.NONE;
- String cipherName = family.getEncryptionType();
- if (cipherName != null) {
- if (!Encryption.isEncryptionEnabled(conf)) {
- throw new IllegalStateException("Encryption for family '" + family.getNameAsString()
- + "' configured with type '" + cipherName + "' but the encryption feature is disabled");
- }
- Cipher cipher;
- Key key;
- byte[] keyBytes = family.getEncryptionKey();
- if (keyBytes != null) {
- // Family provides specific key material
- key = unwrapKey(conf, keyBytes);
- // Use the algorithm the key wants
- cipher = Encryption.getCipher(conf, key.getAlgorithm());
- if (cipher == null) {
- throw new IllegalStateException("Cipher '" + key.getAlgorithm() + "' is not available");
- }
- // Fail if misconfigured
- // We use the encryption type specified in the column schema as a sanity check on
- // what the wrapped key is telling us
- if (!cipher.getName().equalsIgnoreCase(cipherName)) {
- throw new IllegalStateException(
- "Encryption for family '" + family.getNameAsString() + "' configured with type '"
- + cipherName + "' but key specifies algorithm '" + cipher.getName() + "'");
- }
- } else {
- // Family does not provide key material, create a random key
- cipher = Encryption.getCipher(conf, cipherName);
- if (cipher == null) {
- throw new IllegalStateException("Cipher '" + cipherName + "' is not available");
- }
- key = cipher.getRandomKey();
- }
- cryptoContext = Encryption.newContext(conf);
- cryptoContext.setCipher(cipher);
- cryptoContext.setKey(key);
- }
- return cryptoContext;
+ return getUnwrapKey(conf, subject, wrappedKey, cipher, null);
}
/**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1051686d32e8..73637f0cd20e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -343,6 +343,7 @@ public enum OperationStatusCode {
/** Parameter name for HBase instance root directory */
public static final String HBASE_DIR = "hbase.rootdir";
+ public static final String HBASE_ORIGINAL_DIR = "hbase.originalRootdir";
/** Parameter name for HBase client IPC pool type */
public static final String HBASE_CLIENT_IPC_POOL_TYPE = "hbase.client.ipc.pool.type";
@@ -1193,6 +1194,11 @@ public enum OperationStatusCode {
/** Temporary directory used for table creation and deletion */
public static final String HBASE_TEMP_DIRECTORY = ".tmp";
+ /**
+ * Directory used for storing master keys for the cluster
+ */
+ public static final String SYSTEM_KEYS_DIRECTORY = ".system_keys";
+ public static final String SYSTEM_KEY_FILE_PREFIX = "system_key.";
/**
* The period (in milliseconds) between computing region server point in time metrics
*/
@@ -1282,6 +1288,14 @@ public enum OperationStatusCode {
public static final String CRYPTO_KEYPROVIDER_PARAMETERS_KEY =
"hbase.crypto.keyprovider.parameters";
+ /** Configuration key for the managed crypto key provider, a class name */
+ public static final String CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY =
+ "hbase.crypto.managed.keyprovider";
+
+ /** Configuration key for the managed crypto key provider parameters */
+ public static final String CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY =
+ "hbase.crypto.managed.keyprovider.parameters";
+
/** Configuration key for the name of the master key for the cluster, a string */
public static final String CRYPTO_MASTERKEY_NAME_CONF_KEY = "hbase.crypto.master.key.name";
@@ -1305,6 +1319,43 @@ public enum OperationStatusCode {
/** Configuration key for enabling WAL encryption, a boolean */
public static final String ENABLE_WAL_ENCRYPTION = "hbase.regionserver.wal.encryption";
+ /**
+ * Property used by ManagedKeyStoreKeyProvider class to set the alias that identifies the current
+ * system key.
+ */
+ public static final String CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY =
+ "hbase.crypto.managed_key_store.system.key.name";
+ public static final String CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX =
+ "hbase.crypto.managed_key_store.cust.";
+
+ /** Enables or disables the key management feature. */
+ public static final String CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY =
+ "hbase.crypto.managed_keys.enabled";
+ public static final boolean CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED = false;
+
+ /**
+ * Enables or disables key lookup during data path as an alternative to static injection of keys
+ * using control path.
+ */
+ public static final String CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY =
+ "hbase.crypto.managed_keys.dynamic_lookup.enabled";
+ public static final boolean CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_DEFAULT_ENABLED = true;
+
+ /** Maximum number of entries in the managed key data cache. */
+ public static final String CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY =
+ "hbase.crypto.managed_keys.l1_cache.max_entries";
+ public static final int CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT = 1000;
+
+ /** Maximum number of entries in the managed key active keys cache. */
+ public static final String CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY =
+ "hbase.crypto.managed_keys.l1_active_cache.max_ns_entries";
+ public static final int CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT = 100;
+
+ /** Enables or disables local key generation per file. */
+ public static final String CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY =
+ "hbase.crypto.managed_keys.local_key_gen_per_file.enabled";
+ public static final boolean CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_DEFAULT_ENABLED = false;
+
/** Configuration key for setting RPC codec class name */
public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec";
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java
index ce32351fecdf..7e816b917628 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java
@@ -34,6 +34,8 @@ public class Context implements Configurable {
private Configuration conf;
private Cipher cipher;
private Key key;
+ private ManagedKeyData kekData;
+ private String keyNamespace;
private String keyHash;
Context(Configuration conf) {
@@ -97,4 +99,22 @@ public Context setKey(Key key) {
this.keyHash = new String(Hex.encodeHex(Encryption.computeCryptoKeyHash(conf, encoded)));
return this;
}
+
+ public Context setKeyNamespace(String keyNamespace) {
+ this.keyNamespace = keyNamespace;
+ return this;
+ }
+
+ public String getKeyNamespace() {
+ return keyNamespace;
+ }
+
+ public Context setKEKData(ManagedKeyData kekData) {
+ this.kekData = kekData;
+ return this;
+ }
+
+ public ManagedKeyData getKEKData() {
+ return kekData;
+ }
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 13e335b82ee3..56a6ad211731 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -29,12 +29,14 @@
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiFunction;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.crypto.aes.AES;
import org.apache.hadoop.hbase.util.Bytes;
@@ -116,12 +118,6 @@ public Context setCipher(Cipher cipher) {
return this;
}
- @Override
- public Context setKey(Key key) {
- super.setKey(key);
- return this;
- }
-
public Context setKey(byte[] key) {
super.setKey(new SecretKeySpec(key, getCipher().getName()));
return this;
@@ -468,6 +464,19 @@ public static void encryptWithSubjectKey(OutputStream out, InputStream in, Strin
if (key == null) {
throw new IOException("No key found for subject '" + subject + "'");
}
+ encryptWithGivenKey(key, out, in, cipher, iv);
+ }
+
+ /**
+ * Encrypts a block of plaintext with the specified symmetric key.
+ * @param key The symmetric key
+ * @param out ciphertext
+ * @param in plaintext
+ * @param cipher the encryption algorithm
+ * @param iv the initialization vector, can be null
+ */
+ public static void encryptWithGivenKey(Key key, OutputStream out, InputStream in, Cipher cipher,
+ byte[] iv) throws IOException {
Encryptor e = cipher.getEncryptor();
e.setKey(key);
e.setIv(iv); // can be null
@@ -490,36 +499,37 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o
if (key == null) {
throw new IOException("No key found for subject '" + subject + "'");
}
- Decryptor d = cipher.getDecryptor();
- d.setKey(key);
- d.setIv(iv); // can be null
try {
- decrypt(out, in, outLen, d);
+ decryptWithGivenKey(key, out, in, outLen, cipher, iv);
} catch (IOException e) {
// If the current cipher algorithm fails to unwrap, try the alternate cipher algorithm, if one
// is configured
String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY);
if (alternateAlgorithm != null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Unable to decrypt data with current cipher algorithm '"
- + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
- + "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm
- + "' configured.");
- }
+ LOG.debug(
+ "Unable to decrypt data with current cipher algorithm '{}'. "
+ + "Trying with the alternate cipher algorithm '{}' configured.",
+ conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES),
+ alternateAlgorithm);
Cipher alterCipher = Encryption.getCipher(conf, alternateAlgorithm);
if (alterCipher == null) {
throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available");
}
- d = alterCipher.getDecryptor();
- d.setKey(key);
- d.setIv(iv); // can be null
- decrypt(out, in, outLen, d);
+ decryptWithGivenKey(key, out, in, outLen, alterCipher, iv);
} else {
- throw new IOException(e);
+ throw e;
}
}
}
+ public static void decryptWithGivenKey(Key key, OutputStream out, InputStream in, int outLen,
+ Cipher cipher, byte[] iv) throws IOException {
+ Decryptor d = cipher.getDecryptor();
+ d.setKey(key);
+ d.setIv(iv); // can be null
+ decrypt(out, in, outLen, d);
+ }
+
private static ClassLoader getClassLoaderForClass(Class> c) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
@@ -546,29 +556,50 @@ public static CipherProvider getCipherProvider(Configuration conf) {
}
}
- static final Map, KeyProvider> keyProviderCache = new ConcurrentHashMap<>();
+ static final Map, Object> keyProviderCache = new ConcurrentHashMap<>();
- public static KeyProvider getKeyProvider(Configuration conf) {
- String providerClassName =
- conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName());
- String providerParameters = conf.get(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "");
- try {
- Pair providerCacheKey = new Pair<>(providerClassName, providerParameters);
- KeyProvider provider = keyProviderCache.get(providerCacheKey);
- if (provider != null) {
- return provider;
- }
- provider = (KeyProvider) ReflectionUtils
- .newInstance(getClassLoaderForClass(KeyProvider.class).loadClass(providerClassName), conf);
- provider.init(providerParameters);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Installed " + providerClassName + " into key provider cache");
+ private static Object createProvider(final Configuration conf, String classNameKey,
+ String parametersKey, Class> defaultProviderClass, ClassLoader classLoaderForClass,
+ BiFunction
+ */
+@CoreCoprocessor
+@InterfaceAudience.Private
+public class KeymetaServiceEndpoint implements MasterCoprocessor {
+ private static final Logger LOG = LoggerFactory.getLogger(KeymetaServiceEndpoint.class);
+
+ private MasterServices master = null;
+
+ private final ManagedKeysService managedKeysService = new KeymetaAdminServiceImpl();
+
+ /**
+ * Starts the coprocessor by initializing the reference to the
+ * {@link org.apache.hadoop.hbase.master.MasterServices} * instance.
+ * @param env The coprocessor environment.
+ * @throws IOException If an error occurs during initialization.
+ */
+ @Override
+ public void start(CoprocessorEnvironment env) throws IOException {
+ if (!(env instanceof HasMasterServices)) {
+ throw new IOException("Does not implement HMasterServices");
+ }
+
+ master = ((HasMasterServices) env).getMasterServices();
+ }
+
+ /**
+ * Returns an iterable of the available coprocessor services, which includes the
+ * {@link ManagedKeysService} implemented by
+ * {@link KeymetaServiceEndpoint.KeymetaAdminServiceImpl}.
+ * @return An iterable of the available coprocessor services.
+ */
+ @Override
+ public Iterable getServices() {
+ return Collections.singleton(managedKeysService);
+ }
+
+ /**
+ * The implementation of the {@link ManagedKeysProtos.ManagedKeysService} interface, which
+ * provides the actual method implementations for enabling key management.
+ */
+ @InterfaceAudience.Private
+ public class KeymetaAdminServiceImpl extends ManagedKeysService {
+
+ /**
+ * Enables key management for a given tenant and namespace, as specified in the provided
+ * request.
+ * @param controller The RPC controller.
+ * @param request The request containing the tenant and table specifications.
+ * @param done The callback to be invoked with the response.
+ */
+ @Override
+ public void enableKeyManagement(RpcController controller, ManagedKeyRequest request,
+ RpcCallback done) {
+ ManagedKeyResponse response = null;
+ ManagedKeyResponse.Builder builder = ManagedKeyResponse.newBuilder();
+ try {
+ initManagedKeyResponseBuilder(controller, request, builder);
+ ManagedKeyData managedKeyState = master.getKeymetaAdmin()
+ .enableKeyManagement(request.getKeyCust().toByteArray(), request.getKeyNamespace());
+ response = generateKeyStateResponse(managedKeyState, builder);
+ } catch (IOException | KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e));
+ builder.setKeyState(ManagedKeyState.KEY_FAILED);
+ }
+ if (response == null) {
+ response = builder.build();
+ }
+ done.run(response);
+ }
+
+ @Override
+ public void getManagedKeys(RpcController controller, ManagedKeyRequest request,
+ RpcCallback done) {
+ GetManagedKeysResponse keyStateResponse = null;
+ ManagedKeyResponse.Builder builder = ManagedKeyResponse.newBuilder();
+ try {
+ initManagedKeyResponseBuilder(controller, request, builder);
+ List managedKeyStates = master.getKeymetaAdmin()
+ .getManagedKeys(request.getKeyCust().toByteArray(), request.getKeyNamespace());
+ keyStateResponse = generateKeyStateResponse(managedKeyStates, builder);
+ } catch (IOException | KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e));
+ }
+ if (keyStateResponse == null) {
+ keyStateResponse = GetManagedKeysResponse.getDefaultInstance();
+ }
+ done.run(keyStateResponse);
+ }
+
+ /**
+ * Rotates the system key (STK) by checking for a new key and propagating it to all region
+ * servers.
+ * @param controller The RPC controller.
+ * @param request The request (empty).
+ * @param done The callback to be invoked with the response.
+ */
+ @Override
+ public void rotateSTK(RpcController controller, EmptyMsg request,
+ RpcCallback done) {
+ boolean rotated;
+ try {
+ rotated = master.getKeymetaAdmin().rotateSTK();
+ } catch (IOException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e));
+ rotated = false;
+ }
+ done.run(BooleanMsg.newBuilder().setBoolMsg(rotated).build());
+ }
+
+ /**
+ * Disables all managed keys for a given custodian and namespace.
+ * @param controller The RPC controller.
+ * @param request The request containing the custodian and namespace specifications.
+ * @param done The callback to be invoked with the response.
+ */
+ @Override
+ public void disableKeyManagement(RpcController controller, ManagedKeyRequest request,
+ RpcCallback done) {
+ ManagedKeyResponse response = null;
+ ManagedKeyResponse.Builder builder = ManagedKeyResponse.newBuilder();
+ try {
+ initManagedKeyResponseBuilder(controller, request, builder);
+ ManagedKeyData managedKeyState = master.getKeymetaAdmin()
+ .disableKeyManagement(request.getKeyCust().toByteArray(), request.getKeyNamespace());
+ response = generateKeyStateResponse(managedKeyState, builder);
+ } catch (IOException | KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e));
+ builder.setKeyState(ManagedKeyState.KEY_FAILED);
+ }
+ if (response == null) {
+ response = builder.build();
+ }
+ done.run(response);
+ }
+
+ /**
+ * Disables a specific managed key for a given custodian, namespace, and metadata.
+ * @param controller The RPC controller.
+ * @param request The request containing the custodian, namespace, and metadata
+ * specifications.
+ * @param done The callback to be invoked with the response.
+ */
+ @Override
+ public void disableManagedKey(RpcController controller, ManagedKeyEntryRequest request,
+ RpcCallback done) {
+ ManagedKeyResponse response = null;
+ ManagedKeyResponse.Builder builder = ManagedKeyResponse.newBuilder();
+ try {
+ initManagedKeyResponseBuilder(controller, request.getKeyCustNs(), builder);
+ // Convert hash to metadata by looking up the key first
+ byte[] keyMetadataHash = request.getKeyMetadataHash().toByteArray();
+ byte[] keyCust = request.getKeyCustNs().getKeyCust().toByteArray();
+ String keyNamespace = request.getKeyCustNs().getKeyNamespace();
+
+ ManagedKeyData managedKeyState =
+ master.getKeymetaAdmin().disableManagedKey(keyCust, keyNamespace, keyMetadataHash);
+ response = generateKeyStateResponse(managedKeyState, builder);
+ } catch (IOException | KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e));
+ builder.setKeyState(ManagedKeyState.KEY_FAILED);
+ }
+ if (response == null) {
+ response = builder.build();
+ }
+ done.run(response);
+ }
+
+ /**
+ * Rotates the managed key for a given custodian and namespace.
+ * @param controller The RPC controller.
+ * @param request The request containing the custodian and namespace specifications.
+ * @param done The callback to be invoked with the response.
+ */
+ @Override
+ public void rotateManagedKey(RpcController controller, ManagedKeyRequest request,
+ RpcCallback done) {
+ ManagedKeyResponse response = null;
+ ManagedKeyResponse.Builder builder = ManagedKeyResponse.newBuilder();
+ try {
+ initManagedKeyResponseBuilder(controller, request, builder);
+ ManagedKeyData managedKeyState = master.getKeymetaAdmin()
+ .rotateManagedKey(request.getKeyCust().toByteArray(), request.getKeyNamespace());
+ response = generateKeyStateResponse(managedKeyState, builder);
+ } catch (IOException | KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e));
+ builder.setKeyState(ManagedKeyState.KEY_FAILED);
+ }
+ if (response == null) {
+ response = builder.build();
+ }
+ done.run(response);
+ }
+
+ /**
+ * Refreshes all managed keys for a given custodian and namespace.
+ * @param controller The RPC controller.
+ * @param request The request containing the custodian and namespace specifications.
+ * @param done The callback to be invoked with the response.
+ */
+ @Override
+ public void refreshManagedKeys(RpcController controller, ManagedKeyRequest request,
+ RpcCallback done) {
+ try {
+ // Do this just for validation.
+ initManagedKeyResponseBuilder(controller, request, ManagedKeyResponse.newBuilder());
+ master.getKeymetaAdmin().refreshManagedKeys(request.getKeyCust().toByteArray(),
+ request.getKeyNamespace());
+ } catch (IOException | KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e));
+ }
+ done.run(EmptyMsg.getDefaultInstance());
+ }
+ }
+
+ @InterfaceAudience.Private
+ public static ManagedKeyResponse.Builder initManagedKeyResponseBuilder(RpcController controller,
+ ManagedKeyRequest request, ManagedKeyResponse.Builder builder) throws IOException {
+ // We need to set this in advance to make sure builder has non-null values set.
+ builder.setKeyCust(request.getKeyCust());
+ builder.setKeyNamespace(request.getKeyNamespace());
+ if (request.getKeyCust().isEmpty()) {
+ throw new IOException("key_cust must not be empty");
+ }
+ if (request.getKeyNamespace().isEmpty()) {
+ throw new IOException("key_namespace must not be empty");
+ }
+ return builder;
+ }
+
+ // Assumes that all ManagedKeyData objects belong to the same custodian and namespace.
+ @InterfaceAudience.Private
+ public static GetManagedKeysResponse generateKeyStateResponse(
+ List managedKeyStates, ManagedKeyResponse.Builder builder) {
+ GetManagedKeysResponse.Builder responseBuilder = GetManagedKeysResponse.newBuilder();
+ for (ManagedKeyData keyData : managedKeyStates) {
+ responseBuilder.addState(generateKeyStateResponse(keyData, builder));
+ }
+ return responseBuilder.build();
+ }
+
+ private static ManagedKeyResponse generateKeyStateResponse(ManagedKeyData keyData,
+ ManagedKeyResponse.Builder builder) {
+ builder
+ .setKeyState(ManagedKeyState.forNumber(keyData.getKeyState().getExternalState().getVal()))
+ .setRefreshTimestamp(keyData.getRefreshTimestamp())
+ .setKeyNamespace(keyData.getKeyNamespace());
+
+ // Set metadata hash if available
+ byte[] metadataHash = keyData.getKeyMetadataHash();
+ if (metadataHash != null) {
+ builder.setKeyMetadataHash(ByteString.copyFrom(metadataHash));
+ }
+
+ return builder.build();
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
new file mode 100644
index 000000000000..4a291ff39d89
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+/**
+ * Accessor for keymeta table as part of key management.
+ */
+@InterfaceAudience.Private
+public class KeymetaTableAccessor extends KeyManagementBase {
+ private static final String KEY_META_INFO_FAMILY_STR = "info";
+
+ public static final byte[] KEY_META_INFO_FAMILY = Bytes.toBytes(KEY_META_INFO_FAMILY_STR);
+
+ public static final TableName KEY_META_TABLE_NAME =
+ TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "keymeta");
+
+ public static final String DEK_METADATA_QUAL_NAME = "m";
+ public static final byte[] DEK_METADATA_QUAL_BYTES = Bytes.toBytes(DEK_METADATA_QUAL_NAME);
+
+ public static final String DEK_CHECKSUM_QUAL_NAME = "c";
+ public static final byte[] DEK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(DEK_CHECKSUM_QUAL_NAME);
+
+ public static final String DEK_WRAPPED_BY_STK_QUAL_NAME = "w";
+ public static final byte[] DEK_WRAPPED_BY_STK_QUAL_BYTES =
+ Bytes.toBytes(DEK_WRAPPED_BY_STK_QUAL_NAME);
+
+ public static final String STK_CHECKSUM_QUAL_NAME = "s";
+ public static final byte[] STK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(STK_CHECKSUM_QUAL_NAME);
+
+ public static final String REFRESHED_TIMESTAMP_QUAL_NAME = "t";
+ public static final byte[] REFRESHED_TIMESTAMP_QUAL_BYTES =
+ Bytes.toBytes(REFRESHED_TIMESTAMP_QUAL_NAME);
+
+ public static final String KEY_STATE_QUAL_NAME = "k";
+ public static final byte[] KEY_STATE_QUAL_BYTES = Bytes.toBytes(KEY_STATE_QUAL_NAME);
+
+ public static final TableDescriptorBuilder TABLE_DESCRIPTOR_BUILDER =
+ TableDescriptorBuilder.newBuilder(KEY_META_TABLE_NAME).setRegionReplication(1)
+ .setPriority(HConstants.SYSTEMTABLE_QOS)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder
+ .newBuilder(KeymetaTableAccessor.KEY_META_INFO_FAMILY)
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setMaxVersions(1).setInMemory(true).build());
+
+ private Server server;
+
+ public KeymetaTableAccessor(Server server) {
+ super(server.getKeyManagementService());
+ this.server = server;
+ }
+
+ public Server getServer() {
+ return server;
+ }
+
+ /**
+ * Add the specified key to the keymeta table.
+ * @param keyData The key data.
+ * @throws IOException when there is an underlying IOException.
+ */
+ public void addKey(ManagedKeyData keyData) throws IOException {
+ assertKeyManagementEnabled();
+ List puts = new ArrayList<>(2);
+ if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
+ puts.add(addMutationColumns(new Put(constructRowKeyForCustNamespace(keyData)), keyData));
+ }
+ final Put putForMetadata =
+ addMutationColumns(new Put(constructRowKeyForMetadata(keyData)), keyData);
+ puts.add(putForMetadata);
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ table.put(puts);
+ }
+ }
+
+ /**
+ * Get all the keys for the specified keyCust and key_namespace.
+ * @param keyCust The key custodian.
+ * @param keyNamespace The namespace
+ * @param includeMarkers Whether to include key management state markers in the result.
+ * @return a list of key data, one for each key, can be empty when none were found.
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public List getAllKeys(byte[] keyCust, String keyNamespace,
+ boolean includeMarkers) throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ Connection connection = getServer().getConnection();
+ byte[] prefixForScan = constructRowKeyForCustNamespace(keyCust, keyNamespace);
+ PrefixFilter prefixFilter = new PrefixFilter(prefixForScan);
+ Scan scan = new Scan();
+ scan.setFilter(prefixFilter);
+ scan.addFamily(KEY_META_INFO_FAMILY);
+
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ ResultScanner scanner = table.getScanner(scan);
+ Set allKeys = new LinkedHashSet<>();
+ for (Result result : scanner) {
+ ManagedKeyData keyData =
+ parseFromResult(getKeyManagementService(), keyCust, keyNamespace, result);
+ if (keyData != null && (includeMarkers || keyData.getKeyMetadata() != null)) {
+ allKeys.add(keyData);
+ }
+ }
+ return allKeys.stream().toList();
+ }
+ }
+
+ /**
+ * Get the key management state marker for the specified keyCust and key_namespace.
+ * @param keyCust The prefix
+ * @param keyNamespace The namespace
+ * @return the key management state marker data, or null if no key management state marker found
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public ManagedKeyData getKeyManagementStateMarker(byte[] keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ return getKey(keyCust, keyNamespace, null);
+ }
+
+ /**
+ * Get the specific key identified by keyCust, keyNamespace and keyMetadataHash.
+ * @param keyCust The prefix.
+ * @param keyNamespace The namespace.
+ * @param keyMetadataHash The metadata hash.
+ * @return the key or {@code null}
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public ManagedKeyData getKey(byte[] keyCust, String keyNamespace, byte[] keyMetadataHash)
+ throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ byte[] rowKey = keyMetadataHash != null
+ ? constructRowKeyForMetadata(keyCust, keyNamespace, keyMetadataHash)
+ : constructRowKeyForCustNamespace(keyCust, keyNamespace);
+ Result result = table.get(new Get(rowKey));
+ return parseFromResult(getKeyManagementService(), keyCust, keyNamespace, result);
+ }
+ }
+
+ /**
+ * Disables a key by removing the wrapped key and updating its state to DISABLED.
+ * @param keyData The key data to disable.
+ * @throws IOException when there is an underlying IOException.
+ */
+ public void disableKey(ManagedKeyData keyData) throws IOException {
+ assertKeyManagementEnabled();
+ Preconditions.checkNotNull(keyData.getKeyMetadata(), "Key metadata cannot be null");
+ byte[] keyCust = keyData.getKeyCustodian();
+ String keyNamespace = keyData.getKeyNamespace();
+ byte[] keyMetadataHash = keyData.getKeyMetadataHash();
+
+ List mutations = new ArrayList<>(3); // Max possible mutations.
+
+ if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
+ // Delete the CustNamespace row
+ byte[] rowKeyForCustNamespace = constructRowKeyForCustNamespace(keyCust, keyNamespace);
+ mutations.add(new Delete(rowKeyForCustNamespace).setDurability(Durability.SKIP_WAL)
+ .setPriority(HConstants.SYSTEMTABLE_QOS));
+ }
+
+ // Update state to DISABLED and timestamp on Metadata row
+ byte[] rowKeyForMetadata = constructRowKeyForMetadata(keyCust, keyNamespace, keyMetadataHash);
+ addMutationsForKeyDisabled(mutations, rowKeyForMetadata, keyData.getKeyMetadata(),
+ keyData.getKeyState() == ManagedKeyState.ACTIVE
+ ? ManagedKeyState.ACTIVE_DISABLED
+ : ManagedKeyState.INACTIVE_DISABLED,
+ keyData.getKeyState());
+
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ table.batch(mutations, null);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new IOException("Interrupted while disabling key", e);
+ }
+ }
+
+ private void addMutationsForKeyDisabled(List mutations, byte[] rowKey, String metadata,
+ ManagedKeyState targetState, ManagedKeyState currentState) {
+ Put put = new Put(rowKey);
+ if (metadata != null) {
+ put.addColumn(KEY_META_INFO_FAMILY, DEK_METADATA_QUAL_BYTES, metadata.getBytes());
+ }
+ Put putForState = addMutationColumnsForState(put, targetState);
+ mutations.add(putForState);
+
+ // Delete wrapped key columns from Metadata row
+ if (currentState == null || ManagedKeyState.isUsable(currentState)) {
+ Delete deleteWrappedKey = new Delete(rowKey).setDurability(Durability.SKIP_WAL)
+ .setPriority(HConstants.SYSTEMTABLE_QOS)
+ .addColumns(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES)
+ .addColumns(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES)
+ .addColumns(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES);
+ mutations.add(deleteWrappedKey);
+ }
+ }
+
+ /**
+ * Adds a key management state marker to the specified (keyCust, keyNamespace) combination. It
+ * also adds delete markers for the columns unrelates to marker, in case the state is
+ * transitioning from ACTIVE to DISABLED or FAILED. This method is only used for setting the state
+ * to DISABLED or FAILED. For ACTIVE state, the addKey() method implicitly adds the marker.
+ * @param keyCust The key custodian.
+ * @param keyNamespace The namespace.
+ * @param state The key management state to add.
+ * @throws IOException when there is an underlying IOException.
+ */
+ public void addKeyManagementStateMarker(byte[] keyCust, String keyNamespace,
+ ManagedKeyState state) throws IOException {
+ assertKeyManagementEnabled();
+ Preconditions.checkArgument(ManagedKeyState.isKeyManagementState(state),
+ "State must be a key management state, got: " + state);
+ List mutations = new ArrayList<>(2);
+ byte[] rowKey = constructRowKeyForCustNamespace(keyCust, keyNamespace);
+ addMutationsForKeyDisabled(mutations, rowKey, null, state, null);
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ table.batch(mutations, null);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new IOException("Interrupted while adding key management state marker", e);
+ }
+ }
+
+ /**
+ * Updates the state of a key to one of the ACTIVE or INACTIVE states. The current state can be
+ * any state, but if it the same, it becomes a no-op.
+ * @param keyData The key data.
+ * @param newState The new state (must be ACTIVE or INACTIVE).
+ * @throws IOException when there is an underlying IOException.
+ */
+ public void updateActiveState(ManagedKeyData keyData, ManagedKeyState newState)
+ throws IOException {
+ assertKeyManagementEnabled();
+ ManagedKeyState currentState = keyData.getKeyState();
+
+ // Validate states
+ Preconditions.checkArgument(ManagedKeyState.isUsable(newState),
+ "New state must be ACTIVE or INACTIVE, got: " + newState);
+ // Even for FAILED keys, we expect the metadata to be non-null.
+ Preconditions.checkNotNull(keyData.getKeyMetadata(), "Key metadata cannot be null");
+
+ // No-op if states are the same
+ if (currentState == newState) {
+ return;
+ }
+
+ List mutations = new ArrayList<>(2);
+ byte[] rowKeyForCustNamespace = constructRowKeyForCustNamespace(keyData);
+ byte[] rowKeyForMetadata = constructRowKeyForMetadata(keyData);
+
+ // First take care of the active key specific row.
+ if (newState == ManagedKeyState.ACTIVE) {
+ // INACTIVE -> ACTIVE: Add CustNamespace row and update Metadata row
+ mutations.add(addMutationColumns(new Put(rowKeyForCustNamespace), keyData));
+ }
+ if (currentState == ManagedKeyState.ACTIVE) {
+ mutations.add(new Delete(rowKeyForCustNamespace).setDurability(Durability.SKIP_WAL)
+ .setPriority(HConstants.SYSTEMTABLE_QOS));
+ }
+
+ // Now take care of the key specific row (for point gets by metadata).
+ if (!ManagedKeyState.isUsable(currentState)) {
+ // For DISABLED and FAILED keys, we don't expect cached key material, so add all columns
+ // similar to what addKey() does.
+ mutations.add(addMutationColumns(new Put(rowKeyForMetadata), keyData));
+ } else {
+ // We expect cached key material, so only update the state and timestamp columns.
+ mutations.add(addMutationColumnsForState(new Put(rowKeyForMetadata), newState));
+ }
+
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ table.batch(mutations, null);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new IOException("Interrupted while updating active state", e);
+ }
+ }
+
+ private Put addMutationColumnsForState(Put put, ManagedKeyState newState) {
+ return addMutationColumnsForState(put, newState, EnvironmentEdgeManager.currentTime());
+ }
+
+ /**
+ * Add only state and timestamp columns to the given Put.
+ */
+ private Put addMutationColumnsForState(Put put, ManagedKeyState newState, long timestamp) {
+ return put.setDurability(Durability.SKIP_WAL).setPriority(HConstants.SYSTEMTABLE_QOS)
+ .addColumn(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES, new byte[] { newState.getVal() })
+ .addColumn(KEY_META_INFO_FAMILY, REFRESHED_TIMESTAMP_QUAL_BYTES, Bytes.toBytes(timestamp));
+ }
+
+ /**
+ * Add the mutation columns to the given Put that are derived from the keyData.
+ */
+ private Put addMutationColumns(Put put, ManagedKeyData keyData) throws IOException {
+ ManagedKeyData latestSystemKey =
+ getKeyManagementService().getSystemKeyCache().getLatestSystemKey();
+ if (keyData.getTheKey() != null) {
+ byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getConfiguration(), null, keyData.getTheKey(),
+ latestSystemKey.getTheKey());
+ put
+ .addColumn(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES,
+ Bytes.toBytes(keyData.getKeyChecksum()))
+ .addColumn(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES, dekWrappedBySTK)
+ .addColumn(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES,
+ Bytes.toBytes(latestSystemKey.getKeyChecksum()));
+ }
+ Put result =
+ addMutationColumnsForState(put, keyData.getKeyState(), keyData.getRefreshTimestamp());
+
+ // Only add metadata column if metadata is not null
+ String metadata = keyData.getKeyMetadata();
+ if (metadata != null) {
+ result.addColumn(KEY_META_INFO_FAMILY, DEK_METADATA_QUAL_BYTES, metadata.getBytes());
+ }
+
+ return result;
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForMetadata(ManagedKeyData keyData) {
+ Preconditions.checkNotNull(keyData.getKeyMetadata(), "Key metadata cannot be null");
+ return constructRowKeyForMetadata(keyData.getKeyCustodian(), keyData.getKeyNamespace(),
+ keyData.getKeyMetadataHash());
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForMetadata(byte[] keyCust, String keyNamespace,
+ byte[] keyMetadataHash) {
+ return Bytes.add(constructRowKeyForCustNamespace(keyCust, keyNamespace), keyMetadataHash);
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForCustNamespace(ManagedKeyData keyData) {
+ return constructRowKeyForCustNamespace(keyData.getKeyCustodian(), keyData.getKeyNamespace());
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForCustNamespace(byte[] keyCust, String keyNamespace) {
+ int custLength = keyCust.length;
+ return Bytes.add(Bytes.toBytes(custLength), keyCust, Bytes.toBytes(keyNamespace));
+ }
+
+ @InterfaceAudience.Private
+ public static ManagedKeyData parseFromResult(KeyManagementService keyManagementService,
+ byte[] keyCust, String keyNamespace, Result result) throws IOException, KeyException {
+ if (result == null || result.isEmpty()) {
+ return null;
+ }
+ ManagedKeyState keyState =
+ ManagedKeyState.forValue(result.getValue(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES)[0]);
+ String dekMetadata =
+ Bytes.toString(result.getValue(KEY_META_INFO_FAMILY, DEK_METADATA_QUAL_BYTES));
+ byte[] dekWrappedByStk = result.getValue(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES);
+ if (
+ (keyState == ManagedKeyState.ACTIVE || keyState == ManagedKeyState.INACTIVE)
+ && dekWrappedByStk == null
+ ) {
+ throw new IOException(keyState + " key must have a wrapped key");
+ }
+ Key dek = null;
+ if (dekWrappedByStk != null) {
+ long stkChecksum =
+ Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES));
+ ManagedKeyData clusterKey =
+ keyManagementService.getSystemKeyCache().getSystemKeyByChecksum(stkChecksum);
+ if (clusterKey == null) {
+ LOG.error("Dropping key with metadata: {} as STK with checksum: {} is unavailable",
+ dekMetadata, stkChecksum);
+ return null;
+ }
+ dek = EncryptionUtil.unwrapKey(keyManagementService.getConfiguration(), null, dekWrappedByStk,
+ clusterKey.getTheKey());
+ }
+ long refreshedTimestamp =
+ Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, REFRESHED_TIMESTAMP_QUAL_BYTES));
+ ManagedKeyData dekKeyData;
+ if (dekMetadata != null) {
+ dekKeyData =
+ new ManagedKeyData(keyCust, keyNamespace, dek, keyState, dekMetadata, refreshedTimestamp);
+ if (dek != null) {
+ long dekChecksum =
+ Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES));
+ if (dekKeyData.getKeyChecksum() != dekChecksum) {
+ LOG.error(
+ "Dropping key, current key checksum: {} didn't match the expected checksum: {}"
+ + " for key with metadata: {}",
+ dekKeyData.getKeyChecksum(), dekChecksum, dekMetadata);
+ dekKeyData = null;
+ }
+ }
+ } else {
+ // Key management marker.
+ dekKeyData = new ManagedKeyData(keyCust, keyNamespace, keyState, refreshedTimestamp);
+ }
+ return dekKeyData;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
new file mode 100644
index 000000000000..f93706690ded
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
@@ -0,0 +1,339 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Function;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * In-memory cache for ManagedKeyData entries, using key metadata hash as the cache key. Uses two
+ * independent Caffeine caches: one for general key data and one for active keys only with
+ * hierarchical structure for efficient single key retrieval.
+ */
+@InterfaceAudience.Private
+public class ManagedKeyDataCache extends KeyManagementBase {
+ private static final Logger LOG = LoggerFactory.getLogger(ManagedKeyDataCache.class);
+
+ private Cache cacheByMetadataHash; // Key is Bytes wrapper around hash
+ private Cache activeKeysCache;
+ private final KeymetaTableAccessor keymetaAccessor;
+
+ /**
+ * Composite key for active keys cache containing custodian and namespace. NOTE: Pair won't work
+ * out of the box because it won't work with byte[] as is.
+ */
+ @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.UNITTEST })
+ public static class ActiveKeysCacheKey {
+ private final byte[] custodian;
+ private final String namespace;
+
+ public ActiveKeysCacheKey(byte[] custodian, String namespace) {
+ this.custodian = custodian;
+ this.namespace = namespace;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ ActiveKeysCacheKey cacheKey = (ActiveKeysCacheKey) obj;
+ return Bytes.equals(custodian, cacheKey.custodian)
+ && Objects.equals(namespace, cacheKey.namespace);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(Bytes.hashCode(custodian), namespace);
+ }
+ }
+
+ /**
+ * Constructs the ManagedKeyDataCache with the given configuration and keymeta accessor. When
+ * keymetaAccessor is null, L2 lookup is disabled and dynamic lookup is enabled.
+ * @param conf The configuration, can't be null.
+ * @param keymetaAccessor The keymeta accessor, can be null.
+ */
+ public ManagedKeyDataCache(Configuration conf, KeymetaTableAccessor keymetaAccessor) {
+ super(conf);
+ this.keymetaAccessor = keymetaAccessor;
+ if (keymetaAccessor == null) {
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, true);
+ }
+
+ int maxEntries = conf.getInt(HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT);
+ int activeKeysMaxEntries =
+ conf.getInt(HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT);
+ this.cacheByMetadataHash = Caffeine.newBuilder().maximumSize(maxEntries).build();
+ this.activeKeysCache = Caffeine.newBuilder().maximumSize(activeKeysMaxEntries).build();
+ }
+
+ /**
+ * Retrieves an entry from the cache, if it already exists, otherwise a null is returned. No
+ * attempt will be made to load from L2 or provider.
+ * @return the corresponding ManagedKeyData entry, or null if not found
+ */
+ public ManagedKeyData getEntry(byte[] keyCust, String keyNamespace, byte[] keyMetadataHash)
+ throws IOException, KeyException {
+ Bytes metadataHashKey = new Bytes(keyMetadataHash);
+ // Return the entry if it exists in the generic cache or active keys cache, otherwise return
+ // null.
+ ManagedKeyData entry = cacheByMetadataHash.get(metadataHashKey, hashKey -> {
+ return getFromActiveKeysCache(keyCust, keyNamespace, keyMetadataHash);
+ });
+ return entry;
+ }
+
+ /**
+ * Retrieves an entry from the cache, loading it from L2 if KeymetaTableAccessor is available.
+ * When L2 is not available, it will try to load from provider, unless dynamic lookup is disabled.
+ * @param keyCust the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadata the key metadata of the entry to be retrieved
+ * @param wrappedKey The DEK key material encrypted with the corresponding KEK, if available.
+ * @return the corresponding ManagedKeyData entry, or null if not found
+ * @throws IOException if an error occurs while loading from KeymetaTableAccessor
+ * @throws KeyException if an error occurs while loading from KeymetaTableAccessor
+ */
+ public ManagedKeyData getEntry(byte[] keyCust, String keyNamespace, String keyMetadata,
+ byte[] wrappedKey) throws IOException, KeyException {
+ // Compute hash and use it as cache key
+ byte[] metadataHashBytes = ManagedKeyData.constructMetadataHash(keyMetadata);
+ Bytes metadataHashKey = new Bytes(metadataHashBytes);
+
+ ManagedKeyData entry = cacheByMetadataHash.get(metadataHashKey, hashKey -> {
+ // First check if it's in the active keys cache
+ ManagedKeyData keyData = getFromActiveKeysCache(keyCust, keyNamespace, metadataHashBytes);
+
+ // Try to load from L2
+ if (keyData == null && keymetaAccessor != null) {
+ try {
+ keyData = keymetaAccessor.getKey(keyCust, keyNamespace, metadataHashBytes);
+ } catch (IOException | KeyException e) {
+ LOG.warn(
+ "Failed to load key from L2 for (custodian: {}, namespace: {}) with metadata hash: {}",
+ ManagedKeyProvider.encodeToStr(keyCust), keyNamespace,
+ ManagedKeyProvider.encodeToStr(metadataHashBytes), e);
+ }
+ }
+
+ // If not found in L2 and dynamic lookup is enabled, try with Key Provider
+ if (keyData == null && isDynamicLookupEnabled()) {
+ String encKeyCust = ManagedKeyProvider.encodeToStr(keyCust);
+ try {
+ keyData = KeyManagementUtils.retrieveKey(getKeyProvider(), keymetaAccessor, encKeyCust,
+ keyCust, keyNamespace, keyMetadata, wrappedKey);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn(
+ "Failed to retrieve key from provider for (custodian: {}, namespace: {}) with "
+ + "metadata hash: {}", ManagedKeyProvider.encodeToStr(keyCust), keyNamespace,
+ ManagedKeyProvider.encodeToStr(metadataHashBytes), e);
+ }
+ }
+
+ if (keyData == null) {
+ keyData =
+ new ManagedKeyData(keyCust, keyNamespace, null, ManagedKeyState.FAILED, keyMetadata);
+ }
+
+ // Also update activeKeysCache if relevant and is missing.
+ if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
+ activeKeysCache.asMap().putIfAbsent(new ActiveKeysCacheKey(keyCust, keyNamespace), keyData);
+ }
+
+ return keyData;
+ });
+
+ // Verify custodian/namespace match to guard against hash collisions
+ if (entry != null && ManagedKeyState.isUsable(entry.getKeyState())) {
+ if (
+ Bytes.equals(entry.getKeyCustodian(), keyCust)
+ && entry.getKeyNamespace().equals(keyNamespace)
+ ) {
+ return entry;
+ }
+ LOG.warn(
+ "Hash collision or incorrect/mismatched custodian/namespace detected for metadata hash: "
+ + "{} - custodian/namespace mismatch expected: ({}, {}), actual: ({}, {})",
+ ManagedKeyProvider.encodeToStr(metadataHashBytes), ManagedKeyProvider.encodeToStr(keyCust),
+ keyNamespace, ManagedKeyProvider.encodeToStr(entry.getKeyCustodian()),
+ entry.getKeyNamespace());
+ }
+ return null;
+ }
+
+ /**
+ * Retrieves an existing key from the active keys cache.
+ * @param keyCust the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadataHash the key metadata hash
+ * @return the ManagedKeyData if found, null otherwise
+ */
+ private ManagedKeyData getFromActiveKeysCache(byte[] keyCust, String keyNamespace,
+ byte[] keyMetadataHash) {
+ ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(keyCust, keyNamespace);
+ ManagedKeyData keyData = activeKeysCache.getIfPresent(cacheKey);
+ if (keyData != null && Bytes.equals(keyData.getKeyMetadataHash(), keyMetadataHash)) {
+ return keyData;
+ }
+ return null;
+ }
+
+ /**
+ * Eject the key identified by the given custodian, namespace and metadata from both the active
+ * keys cache and the generic cache.
+ * @param keyCust the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadataHash the key metadata hash
+ * @return true if the key was ejected from either cache, false otherwise
+ */
+ public boolean ejectKey(byte[] keyCust, String keyNamespace, byte[] keyMetadataHash) {
+ Bytes keyMetadataHashKey = new Bytes(keyMetadataHash);
+ ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(keyCust, keyNamespace);
+ AtomicBoolean ejected = new AtomicBoolean(false);
+ AtomicReference rejectedValue = new AtomicReference<>(null);
+
+ Function conditionalCompute = (value) -> {
+ if (rejectedValue.get() != null) {
+ return value;
+ }
+ if (
+ Bytes.equals(value.getKeyMetadataHash(), keyMetadataHash)
+ && Bytes.equals(value.getKeyCustodian(), keyCust)
+ && value.getKeyNamespace().equals(keyNamespace)
+ ) {
+ ejected.set(true);
+ return null;
+ }
+ rejectedValue.set(value);
+ return value;
+ };
+
+ // Try to eject from active keys cache by matching hash with collision check
+ activeKeysCache.asMap().computeIfPresent(cacheKey,
+ (key, value) -> conditionalCompute.apply(value));
+
+ // Also remove from generic cache by hash, with collision check
+ cacheByMetadataHash.asMap().computeIfPresent(keyMetadataHashKey,
+ (hash, value) -> conditionalCompute.apply(value));
+
+ if (rejectedValue.get() != null) {
+ LOG.warn(
+ "Hash collision or incorrect/mismatched custodian/namespace detected for metadata "
+ + "hash: {} - custodian/namespace mismatch expected: ({}, {}), actual: ({}, {})",
+ ManagedKeyProvider.encodeToStr(keyMetadataHash), ManagedKeyProvider.encodeToStr(keyCust),
+ keyNamespace, ManagedKeyProvider.encodeToStr(rejectedValue.get().getKeyCustodian()),
+ rejectedValue.get().getKeyNamespace());
+ }
+
+ return ejected.get();
+ }
+
+ /**
+ * Clear all the cached entries.
+ */
+ public void clearCache() {
+ cacheByMetadataHash.invalidateAll();
+ activeKeysCache.invalidateAll();
+ }
+
+ /**
+ * @return the approximate number of entries in the main cache which is meant for general lookup
+ * by key metadata hash.
+ */
+ public int getGenericCacheEntryCount() {
+ return (int) cacheByMetadataHash.estimatedSize();
+ }
+
+ /** Returns the approximate number of entries in the active keys cache */
+ public int getActiveCacheEntryCount() {
+ return (int) activeKeysCache.estimatedSize();
+ }
+
+ /**
+ * Retrieves the active entry from the cache based on its key custodian and key namespace. This
+ * method also loads active keys from provider if not found in cache.
+ * @param keyCust The key custodian.
+ * @param keyNamespace the key namespace to search for
+ * @return the ManagedKeyData entry with the given custodian and ACTIVE status, or null if not
+ * found
+ */
+ public ManagedKeyData getActiveEntry(byte[] keyCust, String keyNamespace) {
+ ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(keyCust, keyNamespace);
+
+ ManagedKeyData keyData = activeKeysCache.get(cacheKey, key -> {
+ ManagedKeyData retrievedKey = null;
+
+ // Try to load from KeymetaTableAccessor if not found in cache
+ if (keymetaAccessor != null) {
+ try {
+ retrievedKey = keymetaAccessor.getKeyManagementStateMarker(keyCust, keyNamespace);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn("Failed to load active key from KeymetaTableAccessor for custodian: {} "
+ + "namespace: {}", ManagedKeyProvider.encodeToStr(keyCust), keyNamespace, e);
+ }
+ }
+
+ // As a last ditch effort, load active key from provider. This typically happens for
+ // standalone tools.
+ if (retrievedKey == null && isDynamicLookupEnabled()) {
+ try {
+ String keyCustEnc = ManagedKeyProvider.encodeToStr(keyCust);
+ retrievedKey = KeyManagementUtils.retrieveActiveKey(getKeyProvider(), keymetaAccessor,
+ keyCustEnc, keyCust, keyNamespace, null);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn("Failed to load active key from provider for custodian: {} namespace: {}",
+ ManagedKeyProvider.encodeToStr(keyCust), keyNamespace, e);
+ }
+ }
+
+ if (retrievedKey == null) {
+ retrievedKey = new ManagedKeyData(keyCust, keyNamespace, ManagedKeyState.FAILED);
+ }
+
+ return retrievedKey;
+ });
+
+ // This should never be null, but adding a check just to satisfy spotbugs.
+ if (keyData != null && keyData.getKeyState() == ManagedKeyState.ACTIVE) {
+ return keyData;
+ }
+ return null;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
new file mode 100644
index 000000000000..8de01319e25b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class SystemKeyAccessor extends KeyManagementBase {
+ private static final Logger LOG = LoggerFactory.getLogger(SystemKeyAccessor.class);
+
+ private final FileSystem fs;
+ protected final Path systemKeyDir;
+
+ public SystemKeyAccessor(Server server) throws IOException {
+ this(server.getConfiguration(), server.getFileSystem());
+ }
+
+ public SystemKeyAccessor(Configuration configuration, FileSystem fs) throws IOException {
+ super(configuration);
+ this.systemKeyDir = CommonFSUtils.getSystemKeyDir(configuration);
+ this.fs = fs;
+ }
+
+ /**
+ * Return both the latest system key file and all system key files.
+ * @return a pair of the latest system key file and all system key files
+ * @throws IOException if there is an error getting the latest system key file or no cluster key
+ * is initialized yet.
+ */
+ public Pair> getLatestSystemKeyFile() throws IOException {
+ assertKeyManagementEnabled();
+ List allClusterKeyFiles = getAllSystemKeyFiles();
+ if (allClusterKeyFiles.isEmpty()) {
+ throw new RuntimeException("No cluster key initialized yet");
+ }
+ int currentMaxSeqNum = SystemKeyAccessor.extractKeySequence(allClusterKeyFiles.get(0));
+ return new Pair<>(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + currentMaxSeqNum),
+ allClusterKeyFiles);
+ }
+
+ /**
+ * Return all available cluster key files and return them in the order of latest to oldest. If no
+ * cluster key files are available, then return an empty list. If key management is not enabled,
+ * then return null.
+ * @return a list of all available cluster key files
+ * @throws IOException if there is an error getting the cluster key files
+ */
+ public List getAllSystemKeyFiles() throws IOException {
+ assertKeyManagementEnabled();
+ LOG.info("Getting all system key files from: {} matching prefix: {}", systemKeyDir,
+ SYSTEM_KEY_FILE_PREFIX + "*");
+ Map clusterKeys = new TreeMap<>(Comparator.reverseOrder());
+ for (FileStatus st : fs.globStatus(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))) {
+ Path keyPath = st.getPath();
+ int seqNum = extractSystemKeySeqNum(keyPath);
+ clusterKeys.put(seqNum, keyPath);
+ }
+ return new ArrayList<>(clusterKeys.values());
+ }
+
+ public ManagedKeyData loadSystemKey(Path keyPath) throws IOException {
+ ManagedKeyProvider provider = getKeyProvider();
+ ManagedKeyData keyData = provider.unwrapKey(loadKeyMetadata(keyPath), null);
+ if (keyData == null) {
+ throw new RuntimeException("Failed to load system key from: " + keyPath);
+ }
+ return keyData;
+ }
+
+ @InterfaceAudience.Private
+ public static int extractSystemKeySeqNum(Path keyPath) throws IOException {
+ if (keyPath.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) {
+ try {
+ return Integer.parseInt(keyPath.getName().substring(SYSTEM_KEY_FILE_PREFIX.length()));
+ } catch (NumberFormatException e) {
+ LOG.error("Invalid file name for a cluster key: {}", keyPath, e);
+ }
+ }
+ throw new IOException("Couldn't parse key file name: " + keyPath.getName());
+ }
+
+ /**
+ * Extract the key sequence number from the cluster key file name.
+ * @param clusterKeyFile the path to the cluster key file
+ * @return The sequence or {@code -1} if not a valid sequence file.
+ * @throws IOException if the file name is not a valid sequence file
+ */
+ @InterfaceAudience.Private
+ public static int extractKeySequence(Path clusterKeyFile) throws IOException {
+ int keySeq = -1;
+ if (clusterKeyFile.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) {
+ String seqStr = clusterKeyFile.getName().substring(SYSTEM_KEY_FILE_PREFIX.length());
+ if (!seqStr.isEmpty()) {
+ try {
+ keySeq = Integer.parseInt(seqStr);
+ } catch (NumberFormatException e) {
+ throw new IOException("Invalid file name for a cluster key: " + clusterKeyFile, e);
+ }
+ }
+ }
+ return keySeq;
+ }
+
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ try (FSDataInputStream fin = fs.open(keyPath)) {
+ return fin.readUTF();
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
new file mode 100644
index 000000000000..b01af650d764
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings("checkstyle:FinalClass") // as otherwise it breaks mocking.
+@InterfaceAudience.Private
+public class SystemKeyCache {
+ private static final Logger LOG = LoggerFactory.getLogger(SystemKeyCache.class);
+
+ private final ManagedKeyData latestSystemKey;
+ private final Map systemKeys;
+
+ /**
+ * Create a SystemKeyCache from the specified configuration and file system.
+ * @param configuration the configuration to use
+ * @param fs the file system to use
+ * @return the cache or {@code null} if no keys are found.
+ * @throws IOException if there is an error loading the system keys
+ */
+ public static SystemKeyCache createCache(Configuration configuration, FileSystem fs)
+ throws IOException {
+ SystemKeyAccessor accessor = new SystemKeyAccessor(configuration, fs);
+ return createCache(accessor);
+ }
+
+ /**
+ * Construct the System Key cache from the specified accessor.
+ * @param accessor the accessor to use to load the system keys
+ * @return the cache or {@code null} if no keys are found.
+ * @throws IOException if there is an error loading the system keys
+ */
+ public static SystemKeyCache createCache(SystemKeyAccessor accessor) throws IOException {
+ List allSystemKeyFiles = accessor.getAllSystemKeyFiles();
+ if (allSystemKeyFiles.isEmpty()) {
+ LOG.warn("No system key files found, skipping cache creation");
+ return null;
+ }
+ ManagedKeyData latestSystemKey = null;
+ Map systemKeys = new TreeMap<>();
+ for (Path keyPath : allSystemKeyFiles) {
+ ManagedKeyData keyData = accessor.loadSystemKey(keyPath);
+ LOG.info(
+ "Loaded system key with (custodian: {}, namespace: {}), checksum: {} and metadata hash: {} "
+ + " from file: {}",
+ keyData.getKeyCustodianEncoded(), keyData.getKeyNamespace(), keyData.getKeyChecksum(),
+ keyData.getKeyMetadataHashEncoded(), keyPath);
+ if (latestSystemKey == null) {
+ latestSystemKey = keyData;
+ }
+ systemKeys.put(keyData.getKeyChecksum(), keyData);
+ }
+ return new SystemKeyCache(systemKeys, latestSystemKey);
+ }
+
+ private SystemKeyCache(Map systemKeys, ManagedKeyData latestSystemKey) {
+ this.systemKeys = systemKeys;
+ this.latestSystemKey = latestSystemKey;
+ }
+
+ public ManagedKeyData getLatestSystemKey() {
+ return latestSystemKey;
+ }
+
+ public ManagedKeyData getSystemKeyByChecksum(long checksum) {
+ return systemKeys.get(checksum);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f8abca44e4c5..4c9d3b30fc6a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -119,9 +119,11 @@
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
import org.apache.hadoop.hbase.http.HttpServer;
import org.apache.hadoop.hbase.http.InfoServer;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -246,6 +248,7 @@
import org.apache.hadoop.hbase.rsgroup.RSGroupUtil;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.SecurityConstants;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.trace.TraceUtil;
@@ -357,6 +360,7 @@ public class HMaster extends HBaseServerBase implements Maste
// file system manager for the master FS operations
private MasterFileSystem fileSystemManager;
private MasterWalManager walManager;
+ private SystemKeyManager systemKeyManager;
// manager to manage procedure-based WAL splitting, can be null if current
// is zk-based WAL splitting. SplitWALManager will replace SplitLogManager
@@ -994,6 +998,10 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.clusterId = clusterId.toString();
+ systemKeyManager = new SystemKeyManager(this);
+ systemKeyManager.ensureSystemKeyInitialized();
+ buildSystemKeyCache();
+
// Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their
// hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set
// hbase.write.hbck1.lock.file to false.
@@ -1155,13 +1163,22 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
return;
}
+ this.assignmentManager.joinCluster();
+
+ // If key management is enabled, wait for keymeta table regions to be assigned and online,
+ // which includes creating the table the very first time.
+ // This is to ensure that the encrypted tables can successfully initialize Encryption.Context as
+ // part of the store opening process when processOfflineRegions is called.
+ // Without this, we can end up with race condition where a user store is opened before the
+ // keymeta table regions are online, which would cause the store opening to fail.
+ initKeymetaIfEnabled();
+
TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
final ColumnFamilyDescriptor tableFamilyDesc =
metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
final ColumnFamilyDescriptor replBarrierFamilyDesc =
metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY);
- this.assignmentManager.joinCluster();
// The below depends on hbase:meta being online.
this.assignmentManager.processOfflineRegions();
// this must be called after the above processOfflineRegions to prevent race
@@ -1422,6 +1439,10 @@ private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor)
.setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf))
.setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build();
long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false);
+ waitForProcedureToComplete(pid, "Failed to add table and rep_barrier CFs to meta");
+ }
+
+ private void waitForProcedureToComplete(long pid, String errorMessage) throws IOException {
int tries = 30;
while (
!(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning()
@@ -1440,8 +1461,8 @@ private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor)
} else {
Procedure> result = getMasterProcedureExecutor().getResult(pid);
if (result != null && result.isFailed()) {
- throw new IOException("Failed to add table and rep_barrier CFs to meta. "
- + MasterProcedureUtil.unwrapRemoteIOException(result));
+ throw new IOException(
+ errorMessage + ". " + MasterProcedureUtil.unwrapRemoteIOException(result));
}
}
}
@@ -1519,6 +1540,80 @@ private boolean waitForNamespaceOnline() throws IOException {
return true;
}
+ /**
+ * Creates the keymeta table and waits for all its regions to be online.
+ */
+ private void initKeymetaIfEnabled() throws IOException {
+ if (!SecurityUtil.isKeyManagementEnabled(conf)) {
+ return;
+ }
+
+ String keymetaTableName =
+ KeymetaTableAccessor.KEY_META_TABLE_NAME.getNameWithNamespaceInclAsString();
+ if (!getTableDescriptors().exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)) {
+ LOG.info("initKeymetaIfEnabled: {} table not found. Creating.", keymetaTableName);
+ long keymetaTableProcId =
+ createSystemTable(KeymetaTableAccessor.TABLE_DESCRIPTOR_BUILDER.build(), true);
+
+ LOG.info("initKeymetaIfEnabled: Waiting for {} table creation procedure {} to complete",
+ keymetaTableName, keymetaTableProcId);
+ waitForProcedureToComplete(keymetaTableProcId,
+ "Failed to create keymeta table and add to meta");
+ }
+
+ List ris = this.assignmentManager.getRegionStates()
+ .getRegionsOfTable(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ if (ris.isEmpty()) {
+ throw new RuntimeException(
+ "initKeymetaIfEnabled: No " + keymetaTableName + " table regions found");
+ }
+
+ // First, create assignment procedures for all keymeta regions
+ List procs = new ArrayList<>();
+ for (RegionInfo ri : ris) {
+ RegionStateNode regionNode =
+ assignmentManager.getRegionStates().getOrCreateRegionStateNode(ri);
+ regionNode.lock();
+ try {
+ // Only create if region is in CLOSED or OFFLINE state and no procedure is already attached.
+ // The check for server online is really needed only for the sake of mini cluster as there
+ // are outdated ONLINE entries being returned after a cluster restart that point to the old
+ // RS.
+ if (
+ (regionNode.isInState(RegionState.State.CLOSED, RegionState.State.OFFLINE)
+ || !this.serverManager.isServerOnline(regionNode.getRegionLocation()))
+ && regionNode.getProcedure() == null
+ ) {
+ TransitRegionStateProcedure proc = TransitRegionStateProcedure
+ .assign(getMasterProcedureExecutor().getEnvironment(), ri, null);
+ proc.setCriticalSystemTable(true);
+ regionNode.setProcedure(proc);
+ procs.add(proc);
+ }
+ } finally {
+ regionNode.unlock();
+ }
+ }
+ // Then, trigger assignment for all keymeta regions
+ if (!procs.isEmpty()) {
+ LOG.info("initKeymetaIfEnabled: Submitting {} assignment procedures for {} table regions",
+ procs.size(), keymetaTableName);
+ getMasterProcedureExecutor()
+ .submitProcedures(procs.toArray(new TransitRegionStateProcedure[procs.size()]));
+ }
+
+ // Then wait for all regions to come online
+ LOG.info("initKeymetaIfEnabled: Checking/Waiting for {} table {} regions to be online",
+ keymetaTableName, ris.size());
+ for (RegionInfo ri : ris) {
+ if (!isRegionOnline(ri)) {
+ throw new RuntimeException(keymetaTableName + " table region " + ri.getRegionNameAsString()
+ + " could not be brought online");
+ }
+ }
+ LOG.info("initKeymetaIfEnabled: All {} table regions are online", keymetaTableName);
+ }
+
/**
* Adds the {@code MasterQuotasObserver} to the list of configured Master observers to
* automatically remove quotas for a table when that table is deleted.
@@ -1630,6 +1725,17 @@ public MasterWalManager getMasterWalManager() {
return this.walManager;
}
+ @Override
+ public boolean rotateSystemKeyIfChanged() throws IOException {
+ ManagedKeyData newKey = this.systemKeyManager.rotateSystemKeyIfChanged();
+ if (newKey != null) {
+ this.systemKeyCache = null;
+ buildSystemKeyCache();
+ return true;
+ }
+ return false;
+ }
+
@Override
public SplitWALManager getSplitWALManager() {
return splitWALManager;
@@ -2506,6 +2612,11 @@ protected String getDescription() {
@Override
public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
+ return createSystemTable(tableDescriptor, false);
+ }
+
+ private long createSystemTable(final TableDescriptor tableDescriptor, final boolean isCritical)
+ throws IOException {
if (isStopped()) {
throw new MasterNotRunningException();
}
@@ -2522,10 +2633,10 @@ public long createSystemTable(final TableDescriptor tableDescriptor) throws IOEx
// This special create table is called locally to master. Therefore, no RPC means no need
// to use nonce to detect duplicated RPC call.
- long procId = this.procedureExecutor.submitProcedure(
- new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions));
-
- return procId;
+ CreateTableProcedure proc =
+ new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions);
+ proc.setCriticalSystemTable(isCritical);
+ return this.procedureExecutor.submitProcedure(proc);
}
private void startActiveMasterManager(int infoPort) throws KeeperException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 5a43cd98feb9..0ffbfd15c41d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -66,6 +66,7 @@ public class MasterFileSystem {
private final FileSystem walFs;
// root log directory on the FS
private final Path rootdir;
+ private final Path systemKeyDir;
// hbase temp directory used for table construction and deletion
private final Path tempdir;
// root hbase directory on the FS
@@ -96,6 +97,7 @@ public MasterFileSystem(Configuration conf) throws IOException {
// default localfs. Presumption is that rootdir is fully-qualified before
// we get to here with appropriate fs scheme.
this.rootdir = CommonFSUtils.getRootDir(conf);
+ this.systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
@@ -134,6 +136,7 @@ private void createInitialFileSystemLayout() throws IOException {
HConstants.CORRUPT_DIR_NAME, ReplicationUtils.REMOTE_WAL_DIR_NAME };
// check if the root directory exists
checkRootDir(this.rootdir, conf, this.fs);
+ checkSubDir(this.systemKeyDir, HBASE_DIR_PERMS);
// Check the directories under rootdir.
checkTempDir(this.tempdir, conf, this.fs);
@@ -158,6 +161,7 @@ private void createInitialFileSystemLayout() throws IOException {
if (isSecurityEnabled) {
fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms);
fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
+ fs.setPermission(systemKeyDir, secureRootFilePerms);
}
FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission();
if (
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index e9e0f970ef8d..dbb56899b91f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -193,6 +193,9 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BooleanMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyEntryRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
@@ -3621,6 +3624,24 @@ public GetCachedFilesListResponse getCachedFilesList(RpcController controller,
throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
}
+ @Override
+ public EmptyMsg refreshSystemKeyCache(RpcController controller, EmptyMsg request)
+ throws ServiceException {
+ throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
+ }
+
+ @Override
+ public BooleanMsg ejectManagedKeyDataCacheEntry(RpcController controller,
+ ManagedKeyEntryRequest request) throws ServiceException {
+ throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
+ }
+
+ @Override
+ public EmptyMsg clearManagedKeyDataCache(RpcController controller, EmptyMsg request)
+ throws ServiceException {
+ throw new ServiceException(new DoNotRetryIOException("Unsupported method on master"));
+ }
+
@Override
public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller,
GetLiveRegionServersRequest request) throws ServiceException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 0573b1a75628..745b962860bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.hbck.HbckChore;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
@@ -67,7 +68,7 @@
* adding API. Changes cause ripples through the code base.
*/
@InterfaceAudience.Private
-public interface MasterServices extends Server {
+public interface MasterServices extends Server, KeyManagementService {
/** Returns the underlying snapshot manager */
SnapshotManager getSnapshotManager();
@@ -86,6 +87,9 @@ public interface MasterServices extends Server {
/** Returns Master's WALs {@link MasterWalManager} utility class. */
MasterWalManager getMasterWalManager();
+ /** Rotates the system key if changed, returns true if a new key was detected and rotated */
+ boolean rotateSystemKeyIfChanged() throws IOException;
+
/** Returns Master's {@link ServerManager} instance. */
ServerManager getServerManager();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
new file mode 100644
index 000000000000..de0e37dde275
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.UUID;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class SystemKeyManager extends SystemKeyAccessor {
+ private static final Logger LOG = LoggerFactory.getLogger(SystemKeyManager.class);
+
+ private final MasterServices master;
+
+ public SystemKeyManager(MasterServices master) throws IOException {
+ super(master);
+ this.master = master;
+ }
+
+ public void ensureSystemKeyInitialized() throws IOException {
+ if (!isKeyManagementEnabled()) {
+ return;
+ }
+ List clusterKeys = getAllSystemKeyFiles();
+ if (clusterKeys.isEmpty()) {
+ LOG.info("Initializing System Key for the first time");
+ // Double check for cluster key as another HMaster might have succeeded.
+ if (rotateSystemKey(null, clusterKeys) == null && getAllSystemKeyFiles().isEmpty()) {
+ throw new RuntimeException("Failed to generate or save System Key");
+ }
+ } else if (rotateSystemKeyIfChanged() != null) {
+ LOG.info("System key has been rotated");
+ } else {
+ LOG.info("System key is already initialized and unchanged");
+ }
+ }
+
+ public ManagedKeyData rotateSystemKeyIfChanged() throws IOException {
+ if (!isKeyManagementEnabled()) {
+ return null;
+ }
+ Pair> latestFileResult = getLatestSystemKeyFile();
+ Path latestFile = latestFileResult.getFirst();
+ String latestKeyMetadata = loadKeyMetadata(latestFile);
+ return rotateSystemKey(latestKeyMetadata, latestFileResult.getSecond());
+ }
+
+ private ManagedKeyData rotateSystemKey(String currentKeyMetadata, List allSystemKeyFiles)
+ throws IOException {
+ ManagedKeyProvider provider = getKeyProvider();
+ ManagedKeyData clusterKey =
+ provider.getSystemKey(master.getMasterFileSystem().getClusterId().toString().getBytes());
+ if (clusterKey == null) {
+ throw new IOException("Failed to get system key for cluster id: "
+ + master.getMasterFileSystem().getClusterId().toString());
+ }
+ if (clusterKey.getKeyState() != ManagedKeyState.ACTIVE) {
+ throw new IOException("System key is expected to be ACTIVE but it is: "
+ + clusterKey.getKeyState() + " for metadata: " + clusterKey.getKeyMetadata());
+ }
+ if (clusterKey.getKeyMetadata() == null) {
+ throw new IOException("System key is expected to have metadata but it is null");
+ }
+ if (
+ !clusterKey.getKeyMetadata().equals(currentKeyMetadata)
+ && saveLatestSystemKey(clusterKey.getKeyMetadata(), allSystemKeyFiles)
+ ) {
+ return clusterKey;
+ }
+ return null;
+ }
+
+ private boolean saveLatestSystemKey(String keyMetadata, List allSystemKeyFiles)
+ throws IOException {
+ int nextSystemKeySeq = (allSystemKeyFiles.isEmpty()
+ ? -1
+ : SystemKeyAccessor.extractKeySequence(allSystemKeyFiles.get(0))) + 1;
+ LOG.info("Trying to save a new cluster key at seq: {}", nextSystemKeySeq);
+ MasterFileSystem masterFS = master.getMasterFileSystem();
+ Path nextSystemKeyPath = new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + nextSystemKeySeq);
+ Path tempSystemKeyFile =
+ new Path(masterFS.getTempDir(), nextSystemKeyPath.getName() + UUID.randomUUID());
+ try (
+ FSDataOutputStream fsDataOutputStream = masterFS.getFileSystem().create(tempSystemKeyFile)) {
+ fsDataOutputStream.writeUTF(keyMetadata);
+ boolean succeeded = masterFS.getFileSystem().rename(tempSystemKeyFile, nextSystemKeyPath);
+ if (succeeded) {
+ LOG.info("System key save succeeded for seq: {}", nextSystemKeySeq);
+ } else {
+ LOG.error("System key save failed for seq: {}", nextSystemKeySeq);
+ }
+ return succeeded;
+ } finally {
+ masterFS.getFileSystem().delete(tempSystemKeyFile, false);
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 8c37385757a7..a964dd3b7046 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1285,7 +1285,7 @@ public ReportRegionStateTransitionResponse reportRegionStateTransition(
private void updateRegionTransition(ServerStateNode serverNode, TransitionCode state,
RegionInfo regionInfo, long seqId, long procId) throws IOException {
- checkMetaLoaded(regionInfo);
+ checkMetaLoaded(regionInfo, procId);
RegionStateNode regionNode = regionStates.getRegionStateNode(regionInfo);
if (regionNode == null) {
@@ -1337,7 +1337,7 @@ private boolean reportTransition(RegionStateNode regionNode, ServerStateNode ser
private void updateRegionSplitTransition(final ServerStateNode serverNode,
final TransitionCode state, final RegionInfo parent, final RegionInfo hriA,
final RegionInfo hriB) throws IOException {
- checkMetaLoaded(parent);
+ checkMetaLoaded(parent, Procedure.NO_PROC_ID);
if (state != TransitionCode.READY_TO_SPLIT) {
throw new UnexpectedStateException(
@@ -1391,7 +1391,7 @@ private void updateRegionSplitTransition(final ServerStateNode serverNode,
private void updateRegionMergeTransition(final ServerStateNode serverNode,
final TransitionCode state, final RegionInfo merged, final RegionInfo hriA,
final RegionInfo hriB) throws IOException {
- checkMetaLoaded(merged);
+ checkMetaLoaded(merged, Procedure.NO_PROC_ID);
if (state != TransitionCode.READY_TO_MERGE) {
throw new UnexpectedStateException(
@@ -1911,13 +1911,27 @@ private void loadMeta() throws IOException {
* Used to check if the meta loading is done.
*
* if not we throw PleaseHoldException since we are rebuilding the RegionStates
- * @param hri region to check if it is already rebuild
+ * @param hri region to check if it is already rebuild
+ * @param procId the procedure id for this region operation, or NO_PROC_ID if not available
* @throws PleaseHoldException if meta has not been loaded yet
*/
- private void checkMetaLoaded(RegionInfo hri) throws PleaseHoldException {
+ private void checkMetaLoaded(RegionInfo hri, long procId) throws PleaseHoldException {
if (!isRunning()) {
throw new PleaseHoldException("AssignmentManager not running");
}
+
+ // Check if the procedure is for a critical system table
+ // Critical system tables can proceed even if meta is not loaded yet
+ // We are currently making procId available only for the code path which can execute during the
+ // cluster boot up. In the future, if additional code paths execute during cluster boot up, we
+ // will need to make procId available for all those code paths.
+ if (procId != Procedure.NO_PROC_ID) {
+ Procedure> proc = master.getMasterProcedureExecutor().getProcedure(procId);
+ if (proc != null && proc.isCriticalSystemTable()) {
+ return;
+ }
+ }
+
boolean meta = isMetaRegion(hri);
boolean metaLoaded = isMetaLoaded();
if (!meta && !metaLoaded) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java
index cb3b91ca0e20..fe8c9bfdd7c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.java
@@ -155,6 +155,9 @@ public TableName getTableName() {
@Override
protected boolean waitInitialized(MasterProcedureEnv env) {
+ if (isCriticalSystemTable()) {
+ return false;
+ }
if (TableName.isMetaTableName(getTableName())) {
return false;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
index 12ddb2559367..5f02fea9c463 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
@@ -206,6 +206,9 @@ public TableOperationType getTableOperationType() {
@Override
protected boolean waitInitialized(MasterProcedureEnv env) {
+ if (isCriticalSystemTable()) {
+ return false;
+ }
if (TableName.isMetaTableName(getTableName())) {
return false;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 423297f667d3..6f7ab6b82104 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -264,6 +264,9 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws
@Override
protected boolean waitInitialized(MasterProcedureEnv env) {
+ if (isCriticalSystemTable()) {
+ return false;
+ }
if (getTableName().isSystemTable()) {
// Creating system table is part of the initialization, so only wait for meta loaded instead
// of waiting for master fully initialized.
@@ -360,8 +363,7 @@ public List createHdfsRegions(final MasterProcedureEnv env,
throws IOException {
RegionInfo[] regions =
newRegions != null ? newRegions.toArray(new RegionInfo[newRegions.size()]) : null;
- return ModifyRegionUtils.createRegions(env.getMasterConfiguration(), tableRootDir,
- tableDescriptor, regions, null);
+ return ModifyRegionUtils.createRegions(env, tableRootDir, tableDescriptor, regions, null);
}
});
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index 8b4901e90e85..2d54eaf6c58c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -75,10 +75,10 @@ public TableOperationType getTableOperationType() {
return TableOperationType.CREATE;
}
- private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf)
+ private static TableDescriptor writeFsLayout(Path rootDir, MasterProcedureEnv env)
throws IOException {
LOG.info("BOOTSTRAP: creating hbase:meta region");
- FileSystem fs = rootDir.getFileSystem(conf);
+ FileSystem fs = rootDir.getFileSystem(env.getMasterConfiguration());
Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME);
if (fs.exists(tableDir) && !deleteMetaTableDirectoryIfPartial(fs, tableDir)) {
LOG.warn("Can not delete partial created meta table, continue...");
@@ -87,10 +87,11 @@ private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf)
// created here in bootstrap and it'll need to be cleaned up. Better to
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
- TableDescriptor metaDescriptor =
- FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir);
+ TableDescriptor metaDescriptor = FSTableDescriptors
+ .tryUpdateAndGetMetaTableDescriptor(env.getMasterConfiguration(), fs, rootDir);
HRegion
- .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null)
+ .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, env.getMasterConfiguration(),
+ metaDescriptor, null, env.getMasterServices().getKeyManagementService())
.close();
return metaDescriptor;
}
@@ -104,7 +105,7 @@ protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state)
case INIT_META_WRITE_FS_LAYOUT:
Configuration conf = env.getMasterConfiguration();
Path rootDir = CommonFSUtils.getRootDir(conf);
- TableDescriptor td = writeFsLayout(rootDir, conf);
+ TableDescriptor td = writeFsLayout(rootDir, env);
env.getMasterServices().getTableDescriptors().update(td, true);
setNextState(InitMetaState.INIT_META_ASSIGN_META);
return Flow.HAS_MORE_STATE;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
index 97447e37b7c4..0539fb6250a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
@@ -29,7 +29,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.RegionTooBusyException;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ConnectionUtils;
@@ -44,6 +43,7 @@
import org.apache.hadoop.hbase.ipc.RpcCall;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.log.HBaseMarkers;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -114,7 +114,7 @@ public final class MasterRegion {
private static final int REGION_ID = 1;
- private final Server server;
+ private final MasterServices server;
private final WALFactory walFactory;
@@ -128,7 +128,7 @@ public final class MasterRegion {
private final long regionUpdateRetryPauseTime;
- private MasterRegion(Server server, HRegion region, WALFactory walFactory,
+ private MasterRegion(MasterServices server, HRegion region, WALFactory walFactory,
MasterRegionFlusherAndCompactor flusherAndCompactor, MasterRegionWALRoller walRoller) {
this.server = server;
this.region = region;
@@ -301,14 +301,15 @@ private static WAL createWAL(WALFactory walFactory, MasterRegionWALRoller walRol
private static HRegion bootstrap(Configuration conf, TableDescriptor td, FileSystem fs,
Path rootDir, FileSystem walFs, Path walRootDir, WALFactory walFactory,
- MasterRegionWALRoller walRoller, String serverName, boolean touchInitializingFlag)
+ MasterRegionWALRoller walRoller, MasterServices server, boolean touchInitializingFlag)
throws IOException {
TableName tn = td.getTableName();
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tn).setRegionId(REGION_ID).build();
Path tableDir = CommonFSUtils.getTableDir(rootDir, tn);
// persist table descriptor
FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir, td, true);
- HRegion.createHRegion(conf, regionInfo, fs, tableDir, td).close();
+ HRegion.createHRegion(conf, regionInfo, fs, tableDir, td, server.getKeyManagementService())
+ .close();
Path initializedFlag = new Path(tableDir, INITIALIZED_FLAG);
if (!fs.mkdirs(initializedFlag)) {
throw new IOException("Can not touch initialized flag: " + initializedFlag);
@@ -317,8 +318,10 @@ private static HRegion bootstrap(Configuration conf, TableDescriptor td, FileSys
if (!fs.delete(initializingFlag, true)) {
LOG.warn("failed to clean up initializing flag: " + initializingFlag);
}
- WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, regionInfo);
- return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null);
+ WAL wal = createWAL(walFactory, walRoller, server.getServerName().toString(), walFs, walRootDir,
+ regionInfo);
+ return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null,
+ server.getKeyManagementService());
}
private static RegionInfo loadRegionInfo(FileSystem fs, Path tableDir) throws IOException {
@@ -330,7 +333,7 @@ private static RegionInfo loadRegionInfo(FileSystem fs, Path tableDir) throws IO
private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo regionInfo,
FileSystem fs, Path rootDir, FileSystem walFs, Path walRootDir, WALFactory walFactory,
- MasterRegionWALRoller walRoller, String serverName) throws IOException {
+ MasterRegionWALRoller walRoller, MasterServices server) throws IOException {
Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName());
Path walRegionDir = FSUtils.getRegionDirFromRootDir(walRootDir, regionInfo);
Path replayEditsDir = new Path(walRegionDir, REPLAY_EDITS_DIR);
@@ -346,7 +349,8 @@ private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo r
// to always exist in normal situations, but we should guard against users changing the
// filesystem outside of HBase's line of sight.
if (walFs.exists(walsDir)) {
- replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, serverName, replayEditsDir);
+ replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, server.getServerName().toString(),
+ replayEditsDir);
} else {
LOG.error(
"UNEXPECTED: WAL directory for MasterRegion is missing." + " {} is unexpectedly missing.",
@@ -354,13 +358,15 @@ private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo r
}
// Create a new WAL
- WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, regionInfo);
+ WAL wal = createWAL(walFactory, walRoller, server.getServerName().toString(), walFs, walRootDir,
+ regionInfo);
conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR,
replayEditsDir.makeQualified(walFs.getUri(), walFs.getWorkingDirectory()).toString());
// we do not do WAL splitting here so it is possible to have uncleanly closed WAL files, so we
// need to ignore EOFException.
conf.setBoolean(HRegion.RECOVERED_EDITS_IGNORE_EOF, true);
- return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null);
+ return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null,
+ server);
}
private static void replayWALs(Configuration conf, FileSystem walFs, Path walRootDir,
@@ -437,7 +443,7 @@ private static void tryMigrate(Configuration conf, FileSystem fs, Path tableDir,
public static MasterRegion create(MasterRegionParams params) throws IOException {
TableDescriptor td = params.tableDescriptor();
LOG.info("Create or load local region for table " + td);
- Server server = params.server();
+ MasterServices server = params.server();
Configuration baseConf = server.getConfiguration();
FileSystem fs = CommonFSUtils.getRootDirFileSystem(baseConf);
FileSystem walFs = CommonFSUtils.getWALFileSystem(baseConf);
@@ -476,8 +482,8 @@ public static MasterRegion create(MasterRegionParams params) throws IOException
if (!fs.mkdirs(initializedFlag)) {
throw new IOException("Can not touch initialized flag");
}
- region = bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller,
- server.getServerName().toString(), true);
+ region =
+ bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server, true);
} else {
if (!fs.exists(initializedFlag)) {
if (!fs.exists(initializingFlag)) {
@@ -495,7 +501,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException
RegionInfo regionInfo = loadRegionInfo(fs, tableDir);
tryMigrate(conf, fs, tableDir, regionInfo, oldTd, td);
region = open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller,
- server.getServerName().toString());
+ server);
} else {
// delete all contents besides the initializing flag, here we can make sure tableDir
// exists(unless someone delete it manually...), so we do not do null check here.
@@ -505,7 +511,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException
}
}
region = bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller,
- server.getServerName().toString(), false);
+ server, false);
}
} else {
if (fs.exists(initializingFlag) && !fs.delete(initializingFlag, true)) {
@@ -515,8 +521,8 @@ public static MasterRegion create(MasterRegionParams params) throws IOException
TableDescriptor oldTd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
RegionInfo regionInfo = loadRegionInfo(fs, tableDir);
tryMigrate(conf, fs, tableDir, regionInfo, oldTd, td);
- region = open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller,
- server.getServerName().toString());
+ region =
+ open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
index 71fb76bd0f1b..878f8dc17a1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
@@ -21,12 +21,12 @@
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
@@ -113,7 +113,7 @@ private static TableDescriptor withTrackerConfigs(Configuration conf) {
return tracker.updateWithTrackerConfigs(TableDescriptorBuilder.newBuilder(TABLE_DESC)).build();
}
- public static MasterRegion create(Server server) throws IOException {
+ public static MasterRegion create(MasterServices server) throws IOException {
Configuration conf = server.getConfiguration();
MasterRegionParams params = new MasterRegionParams().server(server)
.regionDirName(MASTER_STORE_DIR).tableDescriptor(withTrackerConfigs(conf));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
index b9065747b669..443bca9f8c97 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hbase.master.region;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -27,7 +27,7 @@
@InterfaceAudience.Private
public class MasterRegionParams {
- private Server server;
+ private MasterServices server;
private String regionDirName;
@@ -55,7 +55,7 @@ public class MasterRegionParams {
private Boolean useMetaCellComparator;
- public MasterRegionParams server(Server server) {
+ public MasterRegionParams server(MasterServices server) {
this.server = server;
return this;
}
@@ -125,7 +125,7 @@ public MasterRegionParams useMetaCellComparator(boolean useMetaCellComparator) {
return this;
}
- public Server server() {
+ public MasterServices server() {
return server;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 9b7daee0f668..c487e438a568 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -90,6 +90,7 @@
import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
@@ -146,6 +147,9 @@
import org.apache.hadoop.hbase.ipc.RpcCall;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerCall;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@@ -166,6 +170,7 @@
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
import org.apache.hadoop.hbase.replication.ReplicationUtils;
import org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
@@ -382,6 +387,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
private final Configuration baseConf;
private final int rowLockWaitDuration;
static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000;
+ private final ManagedKeyDataCache managedKeyDataCache;
+ private final SystemKeyCache systemKeyCache;
private Path regionWalDir;
private FileSystem walFS;
@@ -769,8 +776,36 @@ void sawNoSuchFamily() {
public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd,
final RegionServerServices rsServices) {
+ this(tableDir, wal, fs, confParam, regionInfo, htd, rsServices, null);
+ }
+
+ /**
+ * HRegion constructor. This constructor should only be used for testing and extensions. Instances
+ * of HRegion should be instantiated with the {@link HRegion#createHRegion} or
+ * {@link HRegion#openHRegion} method.
+ * @param tableDir qualified path of directory where region should be located, usually
+ * the table directory.
+ * @param wal The WAL is the outbound log for any updates to the HRegion The wal
+ * file is a logfile from the previous execution that's
+ * custom-computed for this HRegion. The HRegionServer computes and
+ * sorts the appropriate wal info for this HRegion. If there is a
+ * previous wal file (implying that the HRegion has been written-to
+ * before), then read it from the supplied path.
+ * @param fs is the filesystem.
+ * @param confParam is global configuration settings.
+ * @param regionInfo - RegionInfo that describes the region is new), then read them from
+ * the supplied path.
+ * @param htd the table descriptor
+ * @param rsServices reference to {@link RegionServerServices} or null
+ * @param keyManagementService reference to {@link KeyManagementService} or null
+ * @deprecated Use other constructors.
+ */
+ @Deprecated
+ public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
+ final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd,
+ final RegionServerServices rsServices, final KeyManagementService keyManagementService) {
this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo), wal, confParam, htd,
- rsServices);
+ rsServices, keyManagementService);
}
/**
@@ -789,6 +824,28 @@ public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
*/
public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam,
final TableDescriptor htd, final RegionServerServices rsServices) {
+ this(fs, wal, confParam, htd, rsServices, null);
+ }
+
+ /**
+ * HRegion constructor. This constructor should only be used for testing and extensions. Instances
+ * of HRegion should be instantiated with the {@link HRegion#createHRegion} or
+ * {@link HRegion#openHRegion} method.
+ * @param fs is the filesystem.
+ * @param wal The WAL is the outbound log for any updates to the HRegion The wal
+ * file is a logfile from the previous execution that's
+ * custom-computed for this HRegion. The HRegionServer computes and
+ * sorts the appropriate wal info for this HRegion. If there is a
+ * previous wal file (implying that the HRegion has been written-to
+ * before), then read it from the supplied path.
+ * @param confParam is global configuration settings.
+ * @param htd the table descriptor
+ * @param rsServices reference to {@link RegionServerServices} or null
+ * @param keyManagementService reference to {@link KeyManagementService} or null
+ */
+ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam,
+ final TableDescriptor htd, final RegionServerServices rsServices,
+ KeyManagementService keyManagementService) {
if (htd == null) {
throw new IllegalArgumentException("Need table descriptor");
}
@@ -929,6 +986,17 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co
minBlockSizeBytes = Arrays.stream(this.htableDescriptor.getColumnFamilies())
.mapToInt(ColumnFamilyDescriptor::getBlocksize).min().orElse(HConstants.DEFAULT_BLOCKSIZE);
+
+ if (SecurityUtil.isKeyManagementEnabled(conf)) {
+ if (keyManagementService == null) {
+ keyManagementService = KeyManagementService.createDefault(conf, fs.getFileSystem());
+ }
+ this.managedKeyDataCache = keyManagementService.getManagedKeyDataCache();
+ this.systemKeyCache = keyManagementService.getSystemKeyCache();
+ } else {
+ this.managedKeyDataCache = null;
+ this.systemKeyCache = null;
+ }
}
private void setHTableSpecificConf() {
@@ -2122,6 +2190,14 @@ public BlockCache getBlockCache() {
return this.blockCache;
}
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return this.managedKeyDataCache;
+ }
+
+ public SystemKeyCache getSystemKeyCache() {
+ return this.systemKeyCache;
+ }
+
/**
* Only used for unit test which doesn't start region server.
*/
@@ -7592,37 +7668,60 @@ public String toString() {
}
// Utility methods
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
+ public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf,
+ RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) {
+ return newHRegion(tableDir, wal, fs, conf, regionInfo, htd, rsServices, null);
+ }
+
/**
* A utility method to create new instances of HRegion based on the {@link HConstants#REGION_IMPL}
* configuration property.
- * @param tableDir qualified path of directory where region should be located, usually the table
- * directory.
- * @param wal The WAL is the outbound log for any updates to the HRegion The wal file is a
- * logfile from the previous execution that's custom-computed for this HRegion.
- * The HRegionServer computes and sorts the appropriate wal info for this
- * HRegion. If there is a previous file (implying that the HRegion has been
- * written-to before), then read it from the supplied path.
- * @param fs is the filesystem.
- * @param conf is global configuration settings.
- * @param regionInfo - RegionInfo that describes the region is new), then read them from the
- * supplied path.
- * @param htd the table descriptor
+ * @param tableDir qualified path of directory where region should be located, usually
+ * the table directory.
+ * @param wal The WAL is the outbound log for any updates to the HRegion The wal
+ * file is a logfile from the previous execution that's
+ * custom-computed for this HRegion. The HRegionServer computes and
+ * sorts the appropriate wal info for this HRegion. If there is a
+ * previous file (implying that the HRegion has been written-to
+ * before), then read it from the supplied path.
+ * @param fs is the filesystem.
+ * @param conf is global configuration settings.
+ * @param regionInfo - RegionInfo that describes the region is new), then read them from
+ * the supplied path.
+ * @param htd the table descriptor
+ * @param keyManagementService reference to {@link KeyManagementService} or null
* @return the new instance
*/
public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf,
- RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) {
+ RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices,
+ final KeyManagementService keyManagementService) {
+ List> ctorArgTypes =
+ Arrays.asList(Path.class, WAL.class, FileSystem.class, Configuration.class, RegionInfo.class,
+ TableDescriptor.class, RegionServerServices.class, KeyManagementService.class);
+ List ctorArgs =
+ Arrays.asList(tableDir, wal, fs, conf, regionInfo, htd, rsServices, keyManagementService);
+
+ try {
+ return createInstance(conf, ctorArgTypes, ctorArgs);
+ } catch (Throwable e) {
+ // Try the old signature for the sake of test code.
+ return createInstance(conf, ctorArgTypes.subList(0, ctorArgTypes.size() - 1),
+ ctorArgs.subList(0, ctorArgs.size() - 1));
+ }
+ }
+
+ private static HRegion createInstance(Configuration conf, List> ctorArgTypes,
+ List ctorArgs) {
try {
@SuppressWarnings("unchecked")
Class extends HRegion> regionClass =
(Class extends HRegion>) conf.getClass(HConstants.REGION_IMPL, HRegion.class);
Constructor extends HRegion> c =
- regionClass.getConstructor(Path.class, WAL.class, FileSystem.class, Configuration.class,
- RegionInfo.class, TableDescriptor.class, RegionServerServices.class);
-
- return c.newInstance(tableDir, wal, fs, conf, regionInfo, htd, rsServices);
+ regionClass.getConstructor(ctorArgTypes.toArray(new Class>[ctorArgTypes.size()]));
+ return c.newInstance(ctorArgs.toArray(new Object[ctorArgs.size()]));
} catch (Throwable e) {
- // todo: what should I throw here?
throw new IllegalStateException("Could not instantiate a region instance.", e);
}
}
@@ -7635,6 +7734,7 @@ public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configur
* @param initialize - true to initialize the region
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize) throws IOException {
@@ -7650,16 +7750,35 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
* @param rsRpcServices An interface we can request flushes against.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize, RegionServerServices rsRpcServices) throws IOException {
+ return createHRegion(info, rootDir, conf, hTableDescriptor, wal, initialize, rsRpcServices,
+ null);
+ }
+
+ /**
+ * Convenience method creating new HRegions. Used by createTable.
+ * @param info Info for region to create.
+ * @param rootDir Root directory for HBase instance
+ * @param wal shared WAL
+ * @param initialize - true to initialize the region
+ * @param rsRpcServices An interface we can request flushes against.
+ * @param keyManagementService reference to {@link KeyManagementService} or null
+ * @return new HRegion
+ */
+ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
+ final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal,
+ final boolean initialize, RegionServerServices rsRpcServices,
+ final KeyManagementService keyManagementService) throws IOException {
LOG.info("creating " + info + ", tableDescriptor="
+ (hTableDescriptor == null ? "null" : hTableDescriptor) + ", regionDir=" + rootDir);
createRegionDir(conf, info, rootDir);
FileSystem fs = rootDir.getFileSystem(conf);
Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable());
- HRegion region =
- HRegion.newHRegion(tableDir, wal, fs, conf, info, hTableDescriptor, rsRpcServices);
+ HRegion region = HRegion.newHRegion(tableDir, wal, fs, conf, info, hTableDescriptor,
+ rsRpcServices, keyManagementService);
if (initialize) {
region.initialize(null);
}
@@ -7670,11 +7789,13 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
* Create a region under the given table directory.
*/
public static HRegion createHRegion(Configuration conf, RegionInfo regionInfo, FileSystem fs,
- Path tableDir, TableDescriptor tableDesc) throws IOException {
+ Path tableDir, TableDescriptor tableDesc, KeyManagementService keyManagementService)
+ throws IOException {
LOG.info("Creating {}, tableDescriptor={}, under table dir {}", regionInfo, tableDesc,
tableDir);
HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo);
- HRegion region = HRegion.newHRegion(tableDir, null, fs, conf, regionInfo, tableDesc, null);
+ HRegion region = HRegion.newHRegion(tableDir, null, fs, conf, regionInfo, tableDesc, null,
+ keyManagementService);
return region;
}
@@ -7693,7 +7814,14 @@ public static HRegionFileSystem createRegionDir(Configuration configuration, Reg
public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal)
throws IOException {
- return createHRegion(info, rootDir, conf, hTableDescriptor, wal, true);
+ return createHRegion(info, rootDir, conf, hTableDescriptor, wal, null);
+ }
+
+ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
+ final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal,
+ final KeyManagementService keyManagementService) throws IOException {
+ return createHRegion(info, rootDir, conf, hTableDescriptor, wal, true, null,
+ keyManagementService);
}
/**
@@ -7704,6 +7832,7 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
* properly kept up. HRegionStore does this every time it opens a new region.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal,
final Configuration conf) throws IOException {
return openHRegion(info, htd, wal, conf, null, null);
@@ -7725,7 +7854,8 @@ public static HRegion openHRegion(final RegionInfo info, final TableDescriptor h
public static HRegion openHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal,
final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter) throws IOException {
- return openHRegion(CommonFSUtils.getRootDir(conf), info, htd, wal, conf, rsServices, reporter);
+ return openHRegion(CommonFSUtils.getRootDir(conf), info, htd, wal, conf, rsServices, reporter,
+ rsServices);
}
/**
@@ -7739,9 +7869,10 @@ public static HRegion openHRegion(final RegionInfo info, final TableDescriptor h
* @param conf The Configuration object to use.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(Path rootDir, final RegionInfo info, final TableDescriptor htd,
final WAL wal, final Configuration conf) throws IOException {
- return openHRegion(rootDir, info, htd, wal, conf, null, null);
+ return openHRegion(rootDir, info, htd, wal, conf, null, null, null);
}
/**
@@ -7758,10 +7889,33 @@ public static HRegion openHRegion(Path rootDir, final RegionInfo info, final Tab
* @param reporter An interface we can report progress against.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
final TableDescriptor htd, final WAL wal, final Configuration conf,
final RegionServerServices rsServices, final CancelableProgressable reporter)
throws IOException {
+ return openHRegion(rootDir, info, htd, wal, conf, rsServices, reporter, null);
+ }
+
+ /**
+ * Open a Region.
+ * @param rootDir Root directory for HBase instance
+ * @param info Info for region to be opened.
+ * @param htd the table descriptor
+ * @param wal WAL for region to use. This method will call
+ * WAL#setSequenceNumber(long) passing the result of the call to
+ * HRegion#getMinSequenceId() to ensure the wal id is properly kept
+ * up. HRegionStore does this every time it opens a new region.
+ * @param conf The Configuration object to use.
+ * @param rsServices An interface we can request flushes against.
+ * @param reporter An interface we can report progress against.
+ * @param keyManagementService reference to {@link KeyManagementService} or null
+ * @return new HRegion
+ */
+ public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
+ final TableDescriptor htd, final WAL wal, final Configuration conf,
+ final RegionServerServices rsServices, final CancelableProgressable reporter,
+ final KeyManagementService keyManagementService) throws IOException {
FileSystem fs = null;
if (rsServices != null) {
fs = rsServices.getFileSystem();
@@ -7769,7 +7923,8 @@ public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
if (fs == null) {
fs = rootDir.getFileSystem(conf);
}
- return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter);
+ return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter,
+ keyManagementService);
}
/**
@@ -7784,57 +7939,70 @@ public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
* properly kept up. HRegionStore does this every time it opens a new region.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal)
throws IOException {
- return openHRegion(conf, fs, rootDir, info, htd, wal, null, null);
+ return openHRegion(conf, fs, rootDir, info, htd, wal, null, null, null);
+ }
+
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
+ public static HRegion openHRegion(final Configuration conf, final FileSystem fs,
+ final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal,
+ final RegionServerServices rsServices, final CancelableProgressable reporter)
+ throws IOException {
+ return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter, null);
}
/**
* Open a Region.
- * @param conf The Configuration object to use.
- * @param fs Filesystem to use
- * @param rootDir Root directory for HBase instance
- * @param info Info for region to be opened.
- * @param htd the table descriptor
- * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long)
- * passing the result of the call to HRegion#getMinSequenceId() to ensure the
- * wal id is properly kept up. HRegionStore does this every time it opens a new
- * region.
- * @param rsServices An interface we can request flushes against.
- * @param reporter An interface we can report progress against.
+ * @param conf The Configuration object to use.
+ * @param fs Filesystem to use
+ * @param rootDir Root directory for HBase instance
+ * @param info Info for region to be opened.
+ * @param htd the table descriptor
+ * @param wal WAL for region to use. This method will call
+ * WAL#setSequenceNumber(long) passing the result of the call to
+ * HRegion#getMinSequenceId() to ensure the wal id is properly kept
+ * up. HRegionStore does this every time it opens a new region.
+ * @param rsServices An interface we can request flushes against.
+ * @param reporter An interface we can report progress against.
+ * @param keyManagementService reference to {@link KeyManagementService} or null
* @return new HRegion
*/
public static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal,
- final RegionServerServices rsServices, final CancelableProgressable reporter)
- throws IOException {
+ final RegionServerServices rsServices, final CancelableProgressable reporter,
+ final KeyManagementService keyManagementService) throws IOException {
Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable());
- return openHRegionFromTableDir(conf, fs, tableDir, info, htd, wal, rsServices, reporter);
+ return openHRegionFromTableDir(conf, fs, tableDir, info, htd, wal, rsServices, reporter,
+ keyManagementService);
}
/**
* Open a Region.
- * @param conf The Configuration object to use.
- * @param fs Filesystem to use
- * @param info Info for region to be opened.
- * @param htd the table descriptor
- * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long)
- * passing the result of the call to HRegion#getMinSequenceId() to ensure the
- * wal id is properly kept up. HRegionStore does this every time it opens a new
- * region.
- * @param rsServices An interface we can request flushes against.
- * @param reporter An interface we can report progress against.
+ * @param conf The Configuration object to use.
+ * @param fs Filesystem to use
+ * @param info Info for region to be opened.
+ * @param htd the table descriptor
+ * @param wal WAL for region to use. This method will call
+ * WAL#setSequenceNumber(long) passing the result of the call to
+ * HRegion#getMinSequenceId() to ensure the wal id is properly kept
+ * up. HRegionStore does this every time it opens a new region.
+ * @param rsServices An interface we can request flushes against.
+ * @param reporter An interface we can report progress against.
+ * @param keyManagementService reference to {@link KeyManagementService} or null
* @return new HRegion
* @throws NullPointerException if {@code info} is {@code null}
*/
public static HRegion openHRegionFromTableDir(final Configuration conf, final FileSystem fs,
final Path tableDir, final RegionInfo info, final TableDescriptor htd, final WAL wal,
- final RegionServerServices rsServices, final CancelableProgressable reporter)
- throws IOException {
+ final RegionServerServices rsServices, final CancelableProgressable reporter,
+ final KeyManagementService keyManagementService) throws IOException {
Objects.requireNonNull(info, "RegionInfo cannot be null");
LOG.debug("Opening region: {}", info);
- HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices);
+ HRegion r =
+ HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices, keyManagementService);
return r.openHRegion(reporter);
}
@@ -7848,19 +8016,15 @@ public NavigableMap getReplicationScope() {
* @param reporter An interface we can report progress against.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter)
throws IOException {
HRegionFileSystem regionFs = other.getRegionFileSystem();
HRegion r = newHRegion(regionFs.getTableDir(), other.getWAL(), regionFs.getFileSystem(),
- other.baseConf, other.getRegionInfo(), other.getTableDescriptor(), null);
+ other.baseConf, other.getRegionInfo(), other.getTableDescriptor(), null, null);
return r.openHRegion(reporter);
}
- public static Region openHRegion(final Region other, final CancelableProgressable reporter)
- throws IOException {
- return openHRegion((HRegion) other, reporter);
- }
-
/**
* Open HRegion.
*
@@ -7926,7 +8090,7 @@ public static HRegion openReadOnlyFileSystemHRegion(final Configuration conf, fi
if (info.getReplicaId() <= 0) {
info = RegionReplicaUtil.getRegionInfoForReplica(info, 1);
}
- HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null);
+ HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null, null);
r.writestate.setReadOnly(true);
return r.openHRegion(null);
}
@@ -7946,7 +8110,7 @@ public static HRegion warmupHRegion(final RegionInfo info, final TableDescriptor
if (fs == null) {
fs = rootDir.getFileSystem(conf);
}
- HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null);
+ HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null, null);
r.initializeWarmup(reporter);
r.close();
return r;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index cd49ceb753ea..dec10a3c02f6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -120,6 +120,7 @@
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.mob.RSMobFileCleanerChore;
@@ -600,7 +601,6 @@ protected RegionServerCoprocessorHost getCoprocessorHost() {
return getRegionServerCoprocessorHost();
}
- @Override
protected boolean canCreateBaseZNode() {
return !clusterMode();
}
@@ -1453,6 +1453,9 @@ protected void handleReportForDutyResponse(final RegionServerStartupResponse c)
initializeFileSystem();
}
+ buildSystemKeyCache();
+ managedKeyDataCache = new ManagedKeyDataCache(this.getConfiguration(), keymetaAdmin);
+
// hack! Maps DFSClient => RegionServer for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
if (this.conf.get("mapreduce.task.attempt.id") == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 98299c47302c..fde89d122e28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -93,7 +93,7 @@
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
@@ -336,7 +336,8 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family,
private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException {
return new StoreContext.Builder().withBlockSize(family.getBlocksize())
- .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family))
+ .withEncryptionContext(SecurityUtil.createEncryptionContext(conf, region.getTableDescriptor(),
+ family, region.getManagedKeyDataCache(), region.getSystemKeyCache()))
.withBloomType(family.getBloomFilterType()).withCacheConfig(createCacheConf(family))
.withCellComparator(region.getTableDescriptor().isMetaTable() || conf
.getBoolean(HRegion.USE_META_CELL_COMPARATOR, HRegion.DEFAULT_USE_META_CELL_COMPARATOR)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
index a7df71f460e4..0fb5c2e5f940 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
@@ -43,7 +45,11 @@
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.ReaderContext;
import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@@ -213,9 +219,16 @@ public long getMaxMemStoreTS() {
*/
private final BloomType cfBloomType;
+ private String keyNamespace;
+
+ private SystemKeyCache systemKeyCache;
+
+ private final ManagedKeyDataCache managedKeyDataCache;
+
/**
* Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
- * depending on the underlying files (10-20MB?).
+ * depending on the underlying files (10-20MB?). Since this is used only in read path, key
+ * namespace is not needed.
* @param fs The current file system to use.
* @param p The path of the file.
* @param conf The current configuration.
@@ -229,7 +242,9 @@ public long getMaxMemStoreTS() {
*/
public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheConf,
BloomType cfBloomType, boolean primaryReplica, StoreFileTracker sft) throws IOException {
- this(sft.getStoreFileInfo(p, primaryReplica), cfBloomType, cacheConf);
+ this(sft.getStoreFileInfo(p, primaryReplica), cfBloomType, cacheConf, null, null,
+ SecurityUtil.isKeyManagementEnabled(conf) ? SystemKeyCache.createCache(conf, fs) : null,
+ SecurityUtil.isKeyManagementEnabled(conf) ? new ManagedKeyDataCache(conf, null) : null);
}
/**
@@ -243,8 +258,15 @@ public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheCo
* ignored.
* @param cacheConf The cache configuration and block cache reference.
*/
- public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf) {
- this(fileInfo, cfBloomType, cacheConf, null);
+ public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf)
+ throws IOException {
+ this(fileInfo, cfBloomType, cacheConf, null, KeyNamespaceUtil.constructKeyNamespace(fileInfo),
+ SecurityUtil.isKeyManagementEnabled(fileInfo.getConf())
+ ? SystemKeyCache.createCache(fileInfo.getConf(), fileInfo.getFileSystem())
+ : null,
+ SecurityUtil.isKeyManagementEnabled(fileInfo.getConf())
+ ? new ManagedKeyDataCache(fileInfo.getConf(), null)
+ : null);
}
/**
@@ -260,10 +282,14 @@ public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cac
* @param metrics Tracks bloom filter requests and results. May be null.
*/
public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf,
- BloomFilterMetrics metrics) {
+ BloomFilterMetrics metrics, String keyNamespace, SystemKeyCache systemKeyCache,
+ ManagedKeyDataCache managedKeyDataCache) {
this.fileInfo = fileInfo;
this.cacheConf = cacheConf;
this.metrics = metrics;
+ this.keyNamespace = keyNamespace != null ? keyNamespace : KEY_SPACE_GLOBAL;
+ this.systemKeyCache = systemKeyCache;
+ this.managedKeyDataCache = managedKeyDataCache;
if (BloomFilterFactory.isGeneralBloomEnabled(fileInfo.getConf())) {
this.cfBloomType = cfBloomType;
} else {
@@ -392,7 +418,8 @@ public HDFSBlocksDistribution getHDFSBlockDistribution() {
private void open() throws IOException {
fileInfo.initHDFSBlocksDistribution();
long readahead = fileInfo.isNoReadahead() ? 0L : -1L;
- ReaderContext context = fileInfo.createReaderContext(false, readahead, ReaderType.PREAD);
+ ReaderContext context = fileInfo.createReaderContext(false, readahead, ReaderType.PREAD,
+ keyNamespace, systemKeyCache, managedKeyDataCache);
fileInfo.initHFileInfo(context);
StoreFileReader reader = fileInfo.preStoreFileReaderOpen(context, cacheConf);
if (reader == null) {
@@ -540,7 +567,8 @@ public void initReader() throws IOException {
private StoreFileReader createStreamReader(boolean canUseDropBehind) throws IOException {
initReader();
final boolean doDropBehind = canUseDropBehind && cacheConf.shouldDropBehindCompaction();
- ReaderContext context = fileInfo.createReaderContext(doDropBehind, -1, ReaderType.STREAM);
+ ReaderContext context = fileInfo.createReaderContext(doDropBehind, -1, ReaderType.STREAM,
+ keyNamespace, systemKeyCache, managedKeyDataCache);
StoreFileReader reader = fileInfo.preStoreFileReaderOpen(context, cacheConf);
if (reader == null) {
reader = fileInfo.createReader(context, cacheConf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index fdfea375e096..61bd92821de7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -90,6 +90,7 @@
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.PriorityFunction;
@@ -234,6 +235,9 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BooleanMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyEntryRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
@@ -4058,6 +4062,86 @@ public GetCachedFilesListResponse getCachedFilesList(RpcController controller,
return responseBuilder.addAllCachedFiles(fullyCachedFiles).build();
}
+ /**
+ * Refreshes the system key cache on the region server by rebuilding it with the latest keys. This
+ * is called by the master when a system key rotation has occurred.
+ * @param controller the RPC controller
+ * @param request the request
+ * @return empty response
+ */
+ @Override
+ @QosPriority(priority = HConstants.ADMIN_QOS)
+ public EmptyMsg refreshSystemKeyCache(final RpcController controller, final EmptyMsg request)
+ throws ServiceException {
+ try {
+ checkOpen();
+ requestCount.increment();
+ LOG.info("Received RefreshSystemKeyCache request, rebuilding system key cache");
+ server.rebuildSystemKeyCache();
+ return EmptyMsg.getDefaultInstance();
+ } catch (IOException ie) {
+ LOG.error("Failed to rebuild system key cache", ie);
+ throw new ServiceException(ie);
+ }
+ }
+
+ /**
+ * Ejects a specific managed key entry from the managed key data cache on the region server.
+ * @param controller the RPC controller
+ * @param request the request containing key custodian, namespace, and metadata hash
+ * @return BooleanMsg indicating whether the key was ejected
+ */
+ @Override
+ @QosPriority(priority = HConstants.ADMIN_QOS)
+ public BooleanMsg ejectManagedKeyDataCacheEntry(final RpcController controller,
+ final ManagedKeyEntryRequest request) throws ServiceException {
+ try {
+ checkOpen();
+ } catch (IOException e) {
+ LOG.error("Failed to eject managed key data cache entry", e);
+ throw new ServiceException(e);
+ }
+ requestCount.increment();
+ byte[] keyCustodian = request.getKeyCustNs().getKeyCust().toByteArray();
+ String keyNamespace = request.getKeyCustNs().getKeyNamespace();
+ byte[] keyMetadataHash = request.getKeyMetadataHash().toByteArray();
+
+ if (LOG.isInfoEnabled()) {
+ String keyCustodianEncoded = ManagedKeyProvider.encodeToStr(keyCustodian);
+ String keyMetadataHashEncoded = ManagedKeyProvider.encodeToStr(keyMetadataHash);
+ LOG.info(
+ "Received EjectManagedKeyDataCacheEntry request for key custodian: {}, namespace: {}, "
+ + "metadata hash: {}",
+ keyCustodianEncoded, keyNamespace, keyMetadataHashEncoded);
+ }
+
+ boolean ejected = server.getKeyManagementService().getManagedKeyDataCache()
+ .ejectKey(keyCustodian, keyNamespace, keyMetadataHash);
+ return BooleanMsg.newBuilder().setBoolMsg(ejected).build();
+ }
+
+ /**
+ * Clears all entries in the managed key data cache on the region server.
+ * @param controller the RPC controller
+ * @param request the request (empty)
+ * @return empty response
+ */
+ @Override
+ @QosPriority(priority = HConstants.ADMIN_QOS)
+ public EmptyMsg clearManagedKeyDataCache(final RpcController controller, final EmptyMsg request)
+ throws ServiceException {
+ try {
+ checkOpen();
+ } catch (IOException ie) {
+ LOG.error("Failed to clear managed key data cache", ie);
+ throw new ServiceException(ie);
+ }
+ requestCount.increment();
+ LOG.info("Received ClearManagedKeyDataCache request, clearing managed key data cache");
+ server.getKeyManagementService().getManagedKeyDataCache().clearCache();
+ return EmptyMsg.getDefaultInstance();
+ }
+
RegionScannerContext checkQuotaAndGetRegionScannerContext(ScanRequest request,
ScanResponse.Builder builder) throws IOException {
if (request.hasScannerId()) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index a46e2dae695c..db5cec9f3228 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
@@ -54,7 +55,8 @@
* judicious adding API. Changes cause ripples through the code base.
*/
@InterfaceAudience.Private
-public interface RegionServerServices extends Server, MutableOnlineRegions, FavoredNodesForRegion {
+public interface RegionServerServices
+ extends Server, MutableOnlineRegions, FavoredNodesForRegion, KeyManagementService {
/** Returns the WAL for a particular region. Pass null for getting the default (common) WAL */
WAL getWAL(RegionInfo regionInfo) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
index 1c837d216f38..998332637373 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
@@ -25,6 +25,8 @@
import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorConfig;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.yetus.audience.InterfaceAudience;
@@ -117,4 +119,12 @@ public int getNumStores() {
long getMemStoreSize() {
return region.getMemStoreDataSize();
}
+
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return rsServices.getManagedKeyDataCache();
+ }
+
+ public SystemKeyCache getSystemKeyCache() {
+ return rsServices.getSystemKeyCache();
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
index 30cf5e2a92fa..08e710826358 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
@@ -41,6 +41,9 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.io.hfile.BloomFilterMetrics;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy;
@@ -116,6 +119,10 @@ public abstract class StoreEngine storeFiles) throws IOException;
+
+ /**
+ * Get the store context. Get the store context.
+ * @return the store context.
+ */
+ StoreContext getStoreContext();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
index 779a114af594..87eca7b93c9c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
@@ -375,6 +375,11 @@ public String createFromHFileLink(final String hfileLinkName, final boolean crea
createBackRef);
}
+ @Override
+ public StoreContext getStoreContext() {
+ return ctx;
+ }
+
public void removeStoreFiles(List storeFiles) throws IOException {
archiveStoreFiles(storeFiles);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index a4ca20fa7311..1ca3f68ee997 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.master.replication.OfflineTableReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationGroupOffset;
@@ -385,5 +386,10 @@ public Connection createConnection(Configuration conf) throws IOException {
public AsyncClusterConnection getAsyncClusterConnection() {
return null;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
index 5f9433a3f141..5fff2a417ebc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
@@ -17,15 +17,37 @@
*/
package org.apache.hadoop.hbase.security;
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.io.crypto.Cipher;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Security related generic utility methods.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
-public class SecurityUtil {
+public final class SecurityUtil {
+ private static final Logger LOG = LoggerFactory.getLogger(SecurityUtil.class);
+
+ private SecurityUtil() {
+ // Utility class
+ }
/**
* Get the user name from a principal
@@ -45,4 +67,289 @@ public static String getPrincipalWithoutRealm(final String principal) {
int i = principal.indexOf("@");
return (i > -1) ? principal.substring(0, i) : principal;
}
+
+ /**
+ * Helper to create an encyption context with current encryption key, suitable for writes.
+ * @param conf The current configuration.
+ * @param tableDescriptor The table descriptor.
+ * @param family The current column descriptor.
+ * @param managedKeyDataCache The managed key data cache.
+ * @param systemKeyCache The system key cache.
+ * @return The created encryption context.
+ * @throws IOException if an encryption key for the column cannot be unwrapped
+ * @throws IllegalStateException in case of encryption related configuration errors
+ */
+ public static Encryption.Context createEncryptionContext(Configuration conf,
+ TableDescriptor tableDescriptor, ColumnFamilyDescriptor family,
+ ManagedKeyDataCache managedKeyDataCache, SystemKeyCache systemKeyCache) throws IOException {
+ Encryption.Context cryptoContext = Encryption.Context.NONE;
+ boolean isKeyManagementEnabled = isKeyManagementEnabled(conf);
+ String cipherName = family.getEncryptionType();
+ String keyNamespace = null; // Will be set by fallback logic
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating encryption context for table: {} and column family: {}",
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString());
+ }
+ if (cipherName != null) {
+ if (!Encryption.isEncryptionEnabled(conf)) {
+ throw new IllegalStateException("Encryption for family '" + family.getNameAsString()
+ + "' configured with type '" + cipherName + "' but the encryption feature is disabled");
+ }
+ if (isKeyManagementEnabled && systemKeyCache == null) {
+ throw new IOException("Key management is enabled, but SystemKeyCache is null");
+ }
+ Cipher cipher = null;
+ Key key = null;
+ ManagedKeyData kekKeyData =
+ isKeyManagementEnabled ? systemKeyCache.getLatestSystemKey() : null;
+
+ // Scenario 1: If family has a key, unwrap it and use that as DEK.
+ byte[] familyKeyBytes = family.getEncryptionKey();
+ if (familyKeyBytes != null) {
+ try {
+ if (isKeyManagementEnabled) {
+ // Scenario 1a: If key management is enabled, use STK for both unwrapping and KEK.
+ key = EncryptionUtil.unwrapKey(conf, null, familyKeyBytes, kekKeyData.getTheKey());
+ } else {
+ // Scenario 1b: If key management is disabled, unwrap the key using master key.
+ key = EncryptionUtil.unwrapKey(conf, familyKeyBytes);
+ }
+ LOG.debug("Scenario 1: Use family key for namespace {} cipher: {} "
+ + "key management enabled: {}", keyNamespace, cipherName, isKeyManagementEnabled);
+ } catch (KeyException e) {
+ throw new IOException(e);
+ }
+ } else {
+ if (isKeyManagementEnabled) {
+ boolean localKeyGenEnabled =
+ conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_DEFAULT_ENABLED);
+ // Implement 4-step fallback logic for key namespace resolution in the order of
+ // 1. CF KEY_NAMESPACE attribute
+ // 2. Constructed namespace
+ // 3. Table name
+ // 4. Global namespace
+ String[] candidateNamespaces = { family.getEncryptionKeyNamespace(),
+ KeyNamespaceUtil.constructKeyNamespace(tableDescriptor, family),
+ tableDescriptor.getTableName().getNameAsString(), ManagedKeyData.KEY_SPACE_GLOBAL };
+
+ ManagedKeyData activeKeyData = null;
+ for (String candidate : candidateNamespaces) {
+ if (candidate != null) {
+ // Log information on the table and column family we are looking for the active key in
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Looking for active key for table: {} and column family: {} with "
+ + "(custodian: {}, namespace: {})",
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString(),
+ ManagedKeyData.KEY_GLOBAL_CUSTODIAN, candidate);
+ }
+ activeKeyData = managedKeyDataCache
+ .getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, candidate);
+ if (activeKeyData != null) {
+ keyNamespace = candidate;
+ break;
+ }
+ }
+ }
+
+ // Scenario 2: There is an active key
+ if (activeKeyData != null) {
+ if (!localKeyGenEnabled) {
+ // Scenario 2a: Use active key as DEK and latest STK as KEK
+ key = activeKeyData.getTheKey();
+ } else {
+ // Scenario 2b: Use active key as KEK and generate local key as DEK
+ kekKeyData = activeKeyData;
+ // TODO: Use the active key as a seed to generate the local key instead of
+ // random generation
+ cipher = getCipherIfValid(conf, cipherName, activeKeyData.getTheKey(),
+ family.getNameAsString());
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Scenario 2: Use active key with (custodian: {}, namespace: {}) for cipher: {} "
+ + "localKeyGenEnabled: {} for table: {} and column family: {}",
+ activeKeyData.getKeyCustodianEncoded(), activeKeyData.getKeyNamespace(), cipherName,
+ localKeyGenEnabled, tableDescriptor.getTableName().getNameAsString(),
+ family.getNameAsString());
+ }
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Scenario 3a: No active key found for table: {} and column family: {}",
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString());
+ }
+ // Scenario 3a: Do nothing, let a random key be generated as DEK and if key management
+ // is enabled, let STK be used as KEK.
+ }
+ } else {
+ // Scenario 3b: Do nothing, let a random key be generated as DEK, let STK be used as KEK.
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Scenario 3b: Key management is disabled and no ENCRYPTION_KEY attribute "
+ + "set for table: {} and column family: {}",
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString());
+ }
+ }
+ }
+ if (LOG.isDebugEnabled() && kekKeyData != null) {
+ LOG.debug(
+ "Usigng KEK with (custodian: {}, namespace: {}), checksum: {} and metadata " + "hash: {}",
+ kekKeyData.getKeyCustodianEncoded(), kekKeyData.getKeyNamespace(),
+ kekKeyData.getKeyChecksum(), kekKeyData.getKeyMetadataHashEncoded());
+ }
+
+ if (cipher == null) {
+ cipher =
+ getCipherIfValid(conf, cipherName, key, key == null ? null : family.getNameAsString());
+ }
+ if (key == null) {
+ key = cipher.getRandomKey();
+ }
+ cryptoContext = Encryption.newContext(conf);
+ cryptoContext.setCipher(cipher);
+ cryptoContext.setKey(key);
+ cryptoContext.setKeyNamespace(keyNamespace);
+ cryptoContext.setKEKData(kekKeyData);
+ }
+ return cryptoContext;
+ }
+
+ /**
+ * Create an encryption context from encryption key found in a file trailer, suitable for read.
+ * @param conf The current configuration.
+ * @param path The path of the file.
+ * @param trailer The file trailer.
+ * @param managedKeyDataCache The managed key data cache.
+ * @param systemKeyCache The system key cache.
+ * @return The created encryption context or null if no key material is available.
+ * @throws IOException if an encryption key for the file cannot be unwrapped
+ */
+ public static Encryption.Context createEncryptionContext(Configuration conf, Path path,
+ FixedFileTrailer trailer, ManagedKeyDataCache managedKeyDataCache,
+ SystemKeyCache systemKeyCache) throws IOException {
+ ManagedKeyData kekKeyData = null;
+ byte[] keyBytes = trailer.getEncryptionKey();
+ Encryption.Context cryptoContext = Encryption.Context.NONE;
+ LOG.debug("Creating encryption context for path: {}", path);
+ // Check for any key material available
+ if (keyBytes != null) {
+ cryptoContext = Encryption.newContext(conf);
+ Key kek = null;
+
+ // When there is key material, determine the appropriate KEK
+ boolean isKeyManagementEnabled = isKeyManagementEnabled(conf);
+ if (((trailer.getKEKChecksum() != 0L) || isKeyManagementEnabled) && systemKeyCache == null) {
+ throw new IOException("SystemKeyCache can't be null when using key management feature");
+ }
+ if ((trailer.getKEKChecksum() != 0L && !isKeyManagementEnabled)) {
+ throw new IOException(
+ "Seeing newer trailer with KEK checksum, but key management is disabled");
+ }
+
+ // Try STK lookup first if checksum is available.
+ if (trailer.getKEKChecksum() != 0L) {
+ LOG.debug("Looking for System Key with checksum: {}", trailer.getKEKChecksum());
+ ManagedKeyData systemKeyData =
+ systemKeyCache.getSystemKeyByChecksum(trailer.getKEKChecksum());
+ if (systemKeyData != null) {
+ kek = systemKeyData.getTheKey();
+ kekKeyData = systemKeyData;
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Found System Key with (custodian: {}, namespace: {}), checksum: {} and "
+ + "metadata hash: {}",
+ systemKeyData.getKeyCustodianEncoded(), systemKeyData.getKeyNamespace(),
+ systemKeyData.getKeyChecksum(), systemKeyData.getKeyMetadataHashEncoded());
+ }
+ }
+ }
+
+ // If STK lookup failed or no checksum available, try managed key lookup using metadata
+ if (kek == null && trailer.getKEKMetadata() != null) {
+ if (managedKeyDataCache == null) {
+ throw new IOException("KEK metadata is available, but ManagedKeyDataCache is null");
+ }
+ Throwable cause = null;
+ try {
+ kekKeyData = managedKeyDataCache.getEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES,
+ trailer.getKeyNamespace(), trailer.getKEKMetadata(), keyBytes);
+ } catch (KeyException | IOException e) {
+ cause = e;
+ }
+ // When getEntry returns null we treat it the same as exception case.
+ if (kekKeyData == null) {
+ throw new IOException(
+ "Failed to get key data for KEK metadata: " + trailer.getKEKMetadata(), cause);
+ }
+ kek = kekKeyData.getTheKey();
+ } else if (kek == null && isKeyManagementEnabled) {
+ // No checksum or metadata available, fall back to latest system key for backwards
+ // compatibility
+ ManagedKeyData systemKeyData = systemKeyCache.getLatestSystemKey();
+ if (systemKeyData == null) {
+ throw new IOException("Failed to get latest system key");
+ }
+ kek = systemKeyData.getTheKey();
+ kekKeyData = systemKeyData;
+ }
+
+ Key key;
+ if (kek != null) {
+ try {
+ key = EncryptionUtil.unwrapKey(conf, null, keyBytes, kek);
+ } catch (KeyException | IOException e) {
+ throw new IOException("Failed to unwrap key with KEK checksum: "
+ + trailer.getKEKChecksum() + ", metadata: " + trailer.getKEKMetadata(), e);
+ }
+ } else {
+ key = EncryptionUtil.unwrapKey(conf, keyBytes);
+ }
+ // Use the algorithm the key wants
+ Cipher cipher = getCipherIfValid(conf, key.getAlgorithm(), key, null);
+ cryptoContext.setCipher(cipher);
+ cryptoContext.setKey(key);
+ cryptoContext.setKeyNamespace(trailer.getKeyNamespace());
+ cryptoContext.setKEKData(kekKeyData);
+ }
+ return cryptoContext;
+ }
+
+ /**
+ * Get the cipher if the cipher name is valid, otherwise throw an exception.
+ * @param conf the configuration
+ * @param cipherName the cipher name to check
+ * @param key the key to check
+ * @param familyName the family name
+ * @return the cipher if the cipher name is valid
+ * @throws IllegalStateException if the cipher name is not valid
+ */
+ private static Cipher getCipherIfValid(Configuration conf, String cipherName, Key key,
+ String familyName) {
+ // Fail if misconfigured
+ // We use the encryption type specified in the column schema as a sanity check
+ // on
+ // what the wrapped key is telling us
+ if (key != null && !key.getAlgorithm().equalsIgnoreCase(cipherName)) {
+ throw new IllegalStateException(
+ "Encryption for family '" + familyName + "' configured with type '" + cipherName
+ + "' but key specifies algorithm '" + key.getAlgorithm() + "'");
+ }
+ // Use the algorithm the key wants
+ Cipher cipher = Encryption.getCipher(conf, cipherName);
+ if (cipher == null) {
+ throw new IllegalStateException("Cipher '" + cipherName + "' is not available");
+ }
+ return cipher;
+ }
+
+ /**
+ * From the given configuration, determine if key management is enabled.
+ * @param conf the configuration to check
+ * @return true if key management is enabled
+ */
+ public static boolean isKeyManagementEnabled(Configuration conf) {
+ return conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
index 192343ae41d3..eb4d72c7745f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
@@ -28,7 +28,9 @@
import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.KeyStoreKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider;
import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -48,12 +50,23 @@ private EncryptionTest() {
* Check that the configured key provider can be loaded and initialized, or throw an exception.
*/
public static void testKeyProvider(final Configuration conf) throws IOException {
- String providerClassName =
- conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName());
+ boolean isKeyManagementEnabled = SecurityUtil.isKeyManagementEnabled(conf);
+ String providerClassName;
+ if (isKeyManagementEnabled) {
+ providerClassName = conf.get(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ ManagedKeyStoreKeyProvider.class.getName());
+ } else {
+ providerClassName =
+ conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName());
+ }
Boolean result = keyProviderResults.get(providerClassName);
if (result == null) {
try {
- Encryption.getKeyProvider(conf);
+ if (isKeyManagementEnabled) {
+ Encryption.getManagedKeyProvider(conf);
+ } else {
+ Encryption.getKeyProvider(conf);
+ }
keyProviderResults.put(providerClassName, true);
} catch (Exception e) { // most likely a RuntimeException
keyProviderResults.put(providerClassName, false);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 564c46ad5bf6..d91cd9b78615 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -84,26 +85,33 @@ public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor,
/**
* Create new set of regions on the specified file-system. NOTE: that you should add the regions
* to hbase:meta after this operation.
- * @param conf {@link Configuration}
+ * @param env {@link MasterProcedureEnv}
* @param rootDir Root directory for HBase instance
* @param tableDescriptor description of the table
* @param newRegions {@link RegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
*/
- public static List createRegions(final Configuration conf, final Path rootDir,
+ public static List createRegions(final MasterProcedureEnv env, final Path rootDir,
final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task)
throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
- ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf,
+ ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(env.getMasterConfiguration(),
"RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber);
try {
- return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task);
+ return createRegions(exec, env.getMasterConfiguration(), env, rootDir, tableDescriptor,
+ newRegions, task);
} finally {
exec.shutdownNow();
}
}
+ public static List createRegions(final ThreadPoolExecutor exec,
+ final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor,
+ final RegionInfo[] newRegions, final RegionFillTask task) throws IOException {
+ return createRegions(exec, conf, null, rootDir, tableDescriptor, newRegions, task);
+ }
+
/**
* Create new set of regions on the specified file-system. NOTE: that you should add the regions
* to hbase:meta after this operation.
@@ -115,8 +123,9 @@ public static List createRegions(final Configuration conf, final Pat
* @param task {@link RegionFillTask} custom code to populate region after creation
*/
public static List createRegions(final ThreadPoolExecutor exec,
- final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor,
- final RegionInfo[] newRegions, final RegionFillTask task) throws IOException {
+ final Configuration conf, final MasterProcedureEnv env, final Path rootDir,
+ final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task)
+ throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
CompletionService completionService = new ExecutorCompletionService<>(exec);
@@ -125,7 +134,7 @@ public static List createRegions(final ThreadPoolExecutor exec,
completionService.submit(new Callable() {
@Override
public RegionInfo call() throws IOException {
- return createRegion(conf, rootDir, tableDescriptor, newRegion, task);
+ return createRegion(conf, env, rootDir, tableDescriptor, newRegion, task);
}
});
}
@@ -151,15 +160,16 @@ public RegionInfo call() throws IOException {
* @param newRegion {@link RegionInfo} that describes the region to create
* @param task {@link RegionFillTask} custom code to populate region after creation
*/
- public static RegionInfo createRegion(final Configuration conf, final Path rootDir,
- final TableDescriptor tableDescriptor, final RegionInfo newRegion, final RegionFillTask task)
- throws IOException {
+ public static RegionInfo createRegion(final Configuration conf, final MasterProcedureEnv env,
+ final Path rootDir, final TableDescriptor tableDescriptor, final RegionInfo newRegion,
+ final RegionFillTask task) throws IOException {
// 1. Create HRegion
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
Configuration confForWAL = new Configuration(conf);
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
- HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false);
+ HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false,
+ null, env == null ? null : env.getMasterServices());
try {
// 2. Custom user code to interact with the created region
if (task != null) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
new file mode 100644
index 000000000000..16fadfd81a15
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.security.Key;
+import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+
+public class DummyKeyProvider implements KeyProvider {
+ @Override
+ public void init(String params) {
+ }
+
+ @Override
+ public Key[] getKeys(String[] aliases) {
+ return null;
+ }
+
+ @Override
+ public Key getKey(String alias) {
+ return null;
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
new file mode 100644
index 000000000000..c91539b7ed68
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.Key;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.mockito.Mockito;
+
+public class ManagedKeyProviderInterceptor extends MockManagedKeyProvider {
+ public final MockManagedKeyProvider delegate;
+ public final MockManagedKeyProvider spy;
+
+ public ManagedKeyProviderInterceptor() {
+ this.delegate = new MockManagedKeyProvider();
+ this.spy = Mockito.spy(delegate);
+ }
+
+ @Override
+ public void initConfig(Configuration conf, String providerParameters) {
+ spy.initConfig(conf, providerParameters);
+ }
+
+ @Override
+ public ManagedKeyData getManagedKey(byte[] custodian, String namespace) throws IOException {
+ return spy.getManagedKey(custodian, namespace);
+ }
+
+ @Override
+ public ManagedKeyData getSystemKey(byte[] systemId) throws IOException {
+ return spy.getSystemKey(systemId);
+ }
+
+ @Override
+ public ManagedKeyData unwrapKey(String keyMetadata, byte[] wrappedKey) throws IOException {
+ return spy.unwrapKey(keyMetadata, wrappedKey);
+ }
+
+ @Override
+ public void init(String params) {
+ spy.init(params);
+ }
+
+ @Override
+ public Key getKey(String alias) {
+ return spy.getKey(alias);
+ }
+
+ @Override
+ public Key[] getKeys(String[] aliases) {
+ return spy.getKeys(aliases);
+ }
+
+ @Override
+ public void setMockedKeyState(String alias, ManagedKeyState state) {
+ delegate.setMockedKeyState(alias, state);
+ }
+
+ @Override
+ public void setMultikeyGenMode(boolean multikeyGenMode) {
+ delegate.setMultikeyGenMode(multikeyGenMode);
+ }
+
+ @Override
+ public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace) {
+ return delegate.getLastGeneratedKeyData(alias, keyNamespace);
+ }
+
+ @Override
+ public void setMockedKey(String alias, java.security.Key key, String keyNamespace) {
+ delegate.setMockedKey(alias, key, keyNamespace);
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
new file mode 100644
index 000000000000..9f2381e849bb
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.junit.After;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ManagedKeyTestBase {
+ private static final Logger LOG = LoggerFactory.getLogger(ManagedKeyTestBase.class);
+
+ protected HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+ @Before
+ public void setUp() throws Exception {
+ // Uncomment to enable trace logging for the tests that extend this base class.
+ // Log4jUtils.setLogLevel("org.apache.hadoop.hbase", "TRACE");
+ if (isWithKeyManagement()) {
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ getKeyProviderClass().getName());
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes",
+ KeymetaServiceEndpoint.class.getName());
+ }
+
+ // Start the minicluster if needed
+ if (isWithMiniClusterStart()) {
+ LOG.info("\n\nManagedKeyTestBase.setUp: Starting minicluster\n");
+ startMiniCluster();
+ LOG.info("\n\nManagedKeyTestBase.setUp: Minicluster successfully started\n");
+ }
+ }
+
+ protected void startMiniCluster() throws Exception {
+ startMiniCluster(getSystemTableNameToWaitFor());
+ }
+
+ protected void startMiniCluster(TableName tableNameToWaitFor) throws Exception {
+ TEST_UTIL.startMiniCluster(1);
+ waitForMasterInitialization(tableNameToWaitFor);
+ }
+
+ protected void restartMiniCluster() throws Exception {
+ restartMiniCluster(getSystemTableNameToWaitFor());
+ }
+
+ protected void restartMiniCluster(TableName tableNameToWaitFor) throws Exception {
+ LOG.info("\n\nManagedKeyTestBase.restartMiniCluster: Flushing caches\n");
+ TEST_UTIL.flush();
+
+ LOG.info("\n\nManagedKeyTestBase.restartMiniCluster: Shutting down cluster\n");
+ TEST_UTIL.shutdownMiniHBaseCluster();
+
+ LOG.info("\n\nManagedKeyTestBase.restartMiniCluster: Sleeping a bit\n");
+ Thread.sleep(2000);
+
+ LOG.info("\n\nManagedKeyTestBase.restartMiniCluster: Starting the cluster back up\n");
+ TEST_UTIL.restartHBaseCluster(1);
+
+ waitForMasterInitialization(tableNameToWaitFor);
+ }
+
+ private void waitForMasterInitialization(TableName tableNameToWaitFor) throws Exception {
+ LOG.info(
+ "\n\nManagedKeyTestBase.waitForMasterInitialization: Waiting for master initialization\n");
+ TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
+
+ LOG.info(
+ "\n\nManagedKeyTestBase.waitForMasterInitialization: Waiting for regions to be assigned\n");
+ TEST_UTIL.waitUntilAllRegionsAssigned(tableNameToWaitFor);
+ LOG.info("\n\nManagedKeyTestBase.waitForMasterInitialization: Regions assigned\n");
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ LOG.info("\n\nManagedKeyTestBase.tearDown: Shutting down cluster\n");
+ TEST_UTIL.shutdownMiniCluster();
+ LOG.info("\n\nManagedKeyTestBase.tearDown: Cluster successfully shut down\n");
+ // Clear the provider cache to avoid test interference
+ Encryption.clearKeyProviderCache();
+ }
+
+ protected Class extends ManagedKeyProvider> getKeyProviderClass() {
+ return MockManagedKeyProvider.class;
+ }
+
+ protected boolean isWithKeyManagement() {
+ return true;
+ }
+
+ protected boolean isWithMiniClusterStart() {
+ return true;
+ }
+
+ protected TableName getSystemTableNameToWaitFor() {
+ return KeymetaTableAccessor.KEY_META_TABLE_NAME;
+ }
+
+ /**
+ * Useful hook to enable setting a breakpoint while debugging ruby tests, just log a message and
+ * you can even have a conditional breakpoint.
+ */
+ protected void logMessage(String msg) {
+ LOG.info(msg);
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
new file mode 100644
index 000000000000..3f6ddad6a1ee
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeyManagementBase {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeyManagementBase.class);
+
+ @Test
+ public void testGetKeyProviderWithInvalidProvider() throws Exception {
+ // Setup configuration with a non-ManagedKeyProvider
+ Configuration conf = new Configuration();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ "org.apache.hadoop.hbase.keymeta.DummyKeyProvider");
+
+ MasterServices mockServer = mock(MasterServices.class);
+ when(mockServer.getConfiguration()).thenReturn(conf);
+
+ final KeyManagementBase keyMgmt = new TestKeyManagement(mockServer);
+ assertEquals(mockServer, keyMgmt.getKeyManagementService());
+
+ // Should throw RuntimeException when provider cannot be cast to ManagedKeyProvider
+ RuntimeException exception = assertThrows(RuntimeException.class, () -> {
+ keyMgmt.getKeyProvider();
+ });
+ // The error message will be about ClassCastException since DummyKeyProvider doesn't implement
+ // ManagedKeyProvider
+ assertTrue(exception.getMessage().contains("ClassCastException")
+ || exception.getCause() instanceof ClassCastException);
+
+ exception = assertThrows(RuntimeException.class, () -> {
+ KeyManagementBase keyMgmt2 = new TestKeyManagement(conf);
+ keyMgmt2.getKeyProvider();
+ });
+ assertTrue(exception.getMessage().contains("ClassCastException")
+ || exception.getCause() instanceof ClassCastException);
+
+ assertThrows(IllegalArgumentException.class, () -> {
+ Configuration configuration = null;
+ new TestKeyManagement(configuration);
+ });
+ }
+
+ private static class TestKeyManagement extends KeyManagementBase {
+ public TestKeyManagement(MasterServices server) {
+ super(server);
+ }
+
+ public TestKeyManagement(Configuration configuration) {
+ super(configuration);
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java
new file mode 100644
index 000000000000..bfd8be319895
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+import static org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils.SeekableByteArrayInputStream;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestKeyManagementService {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeyManagementService.class);
+
+ @Rule
+ public TestName name = new TestName();
+
+ protected Configuration conf = new Configuration();
+ protected FileSystem mockFileSystem = mock(FileSystem.class);
+
+ @Before
+ public void setUp() throws Exception {
+ // Clear provider cache to avoid interference from other tests
+ Encryption.clearKeyProviderCache();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
+ conf.set(HConstants.HBASE_ORIGINAL_DIR, "/tmp/hbase");
+ }
+
+ @Test
+ public void testDefaultKeyManagementServiceCreation() throws IOException {
+ // SystemKeyCache needs at least one valid key to be created, so setting up a mock FS that
+ // returns a mock file that returns a known mocked key metadata.
+ MockManagedKeyProvider provider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(conf);
+ ManagedKeyData keyData =
+ provider.getManagedKey("system".getBytes(), ManagedKeyData.KEY_SPACE_GLOBAL);
+ String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
+ Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
+ FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName);
+
+ // Create a real FSDataInputStream that contains the key metadata in UTF format
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ DataOutputStream dos = new DataOutputStream(baos);
+ dos.writeUTF(keyData.getKeyMetadata());
+ dos.close();
+
+ SeekableByteArrayInputStream seekableStream =
+ new SeekableByteArrayInputStream(baos.toByteArray());
+ FSDataInputStream realStream = new FSDataInputStream(seekableStream);
+
+ when(mockFileSystem.open(eq(mockFileStatus.getPath()))).thenReturn(realStream);
+ when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))))
+ .thenReturn(new FileStatus[] { mockFileStatus });
+
+ KeyManagementService service = KeyManagementService.createDefault(conf, mockFileSystem);
+ assertNotNull(service);
+ assertNotNull(service.getSystemKeyCache());
+ assertNotNull(service.getManagedKeyDataCache());
+ assertThrows(UnsupportedOperationException.class, () -> service.getKeymetaAdmin());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementUtils.java
new file mode 100644
index 000000000000..36df6a32ccd8
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementUtils.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.security.Key;
+import java.security.KeyException;
+import javax.crypto.KeyGenerator;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Tests KeyManagementUtils for the difficult to cover error paths.
+ */
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeyManagementUtils {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeyManagementUtils.class);
+
+ private ManagedKeyProvider mockProvider;
+ private KeymetaTableAccessor mockAccessor;
+ private byte[] keyCust;
+ private String keyNamespace;
+ private String keyMetadata;
+ private byte[] wrappedKey;
+ private Key testKey;
+
+ @Before
+ public void setUp() throws Exception {
+ mockProvider = mock(ManagedKeyProvider.class);
+ mockAccessor = mock(KeymetaTableAccessor.class);
+ keyCust = "testCustodian".getBytes();
+ keyNamespace = "testNamespace";
+ keyMetadata = "testMetadata";
+ wrappedKey = new byte[] { 1, 2, 3, 4 };
+
+ KeyGenerator keyGen = KeyGenerator.getInstance("AES");
+ keyGen.init(256);
+ testKey = keyGen.generateKey();
+ }
+
+ @Test
+ public void testRetrieveKeyWithNullResponse() throws Exception {
+ String encKeyCust = ManagedKeyProvider.encodeToStr(keyCust);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(null);
+
+ KeyException exception = assertThrows(KeyException.class, () -> {
+ KeyManagementUtils.retrieveKey(mockProvider, mockAccessor, encKeyCust, keyCust, keyNamespace,
+ keyMetadata, wrappedKey);
+ });
+
+ assertNotNull(exception.getMessage());
+ assertEquals(true, exception.getMessage().contains("Invalid key that is null"));
+ }
+
+ @Test
+ public void testRetrieveKeyWithNullMetadata() throws Exception {
+ String encKeyCust = ManagedKeyProvider.encodeToStr(keyCust);
+ // Create a mock that returns null for getKeyMetadata()
+ ManagedKeyData mockKeyData = mock(ManagedKeyData.class);
+ when(mockKeyData.getKeyMetadata()).thenReturn(null);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(mockKeyData);
+
+ KeyException exception = assertThrows(KeyException.class, () -> {
+ KeyManagementUtils.retrieveKey(mockProvider, mockAccessor, encKeyCust, keyCust, keyNamespace,
+ keyMetadata, wrappedKey);
+ });
+
+ assertNotNull(exception.getMessage());
+ assertEquals(true, exception.getMessage().contains("Invalid key that is null"));
+ }
+
+ @Test
+ public void testRetrieveKeyWithMismatchedMetadata() throws Exception {
+ String encKeyCust = ManagedKeyProvider.encodeToStr(keyCust);
+ String differentMetadata = "differentMetadata";
+ ManagedKeyData keyDataWithDifferentMetadata =
+ new ManagedKeyData(keyCust, keyNamespace, testKey, ManagedKeyState.ACTIVE, differentMetadata);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(keyDataWithDifferentMetadata);
+
+ KeyException exception = assertThrows(KeyException.class, () -> {
+ KeyManagementUtils.retrieveKey(mockProvider, mockAccessor, encKeyCust, keyCust, keyNamespace,
+ keyMetadata, wrappedKey);
+ });
+
+ assertNotNull(exception.getMessage());
+ assertEquals(true, exception.getMessage().contains("invalid metadata"));
+ }
+
+ @Test
+ public void testRetrieveKeyWithDisabledState() throws Exception {
+ String encKeyCust = ManagedKeyProvider.encodeToStr(keyCust);
+ ManagedKeyData keyDataWithDisabledState =
+ new ManagedKeyData(keyCust, keyNamespace, testKey, ManagedKeyState.DISABLED, keyMetadata);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(keyDataWithDisabledState);
+
+ KeyException exception = assertThrows(KeyException.class, () -> {
+ KeyManagementUtils.retrieveKey(mockProvider, mockAccessor, encKeyCust, keyCust, keyNamespace,
+ keyMetadata, wrappedKey);
+ });
+
+ assertNotNull(exception.getMessage());
+ assertEquals(true,
+ exception.getMessage().contains("Invalid key that is null or having invalid metadata"));
+ }
+
+ @Test
+ public void testRetrieveKeySuccess() throws Exception {
+ String encKeyCust = ManagedKeyProvider.encodeToStr(keyCust);
+ ManagedKeyData validKeyData =
+ new ManagedKeyData(keyCust, keyNamespace, testKey, ManagedKeyState.ACTIVE, keyMetadata);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(validKeyData);
+
+ ManagedKeyData result = KeyManagementUtils.retrieveKey(mockProvider, mockAccessor, encKeyCust,
+ keyCust, keyNamespace, keyMetadata, wrappedKey);
+
+ assertNotNull(result);
+ assertEquals(keyMetadata, result.getKeyMetadata());
+ assertEquals(ManagedKeyState.ACTIVE, result.getKeyState());
+ }
+
+ @Test
+ public void testRetrieveKeyWithFailedState() throws Exception {
+ // FAILED state is allowed (unlike DISABLED), so this should succeed
+ String encKeyCust = ManagedKeyProvider.encodeToStr(keyCust);
+ ManagedKeyData keyDataWithFailedState =
+ new ManagedKeyData(keyCust, keyNamespace, null, ManagedKeyState.FAILED, keyMetadata);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(keyDataWithFailedState);
+
+ ManagedKeyData result = KeyManagementUtils.retrieveKey(mockProvider, mockAccessor, encKeyCust,
+ keyCust, keyNamespace, keyMetadata, wrappedKey);
+
+ assertNotNull(result);
+ assertEquals(ManagedKeyState.FAILED, result.getKeyState());
+ }
+
+ @Test
+ public void testRetrieveKeyWithInactiveState() throws Exception {
+ // INACTIVE state is allowed, so this should succeed
+ String encKeyCust = ManagedKeyProvider.encodeToStr(keyCust);
+ ManagedKeyData keyDataWithInactiveState =
+ new ManagedKeyData(keyCust, keyNamespace, testKey, ManagedKeyState.INACTIVE, keyMetadata);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(keyDataWithInactiveState);
+
+ ManagedKeyData result = KeyManagementUtils.retrieveKey(mockProvider, mockAccessor, encKeyCust,
+ keyCust, keyNamespace, keyMetadata, wrappedKey);
+
+ assertNotNull(result);
+ assertEquals(ManagedKeyState.INACTIVE, result.getKeyState());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java
new file mode 100644
index 000000000000..1012d2b5a08f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.StoreContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestKeyNamespaceUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeyNamespaceUtil.class);
+
+ @Test
+ public void testConstructKeyNamespace_FromTableDescriptorAndFamilyDescriptor() {
+ TableDescriptor tableDescriptor = mock(TableDescriptor.class);
+ ColumnFamilyDescriptor familyDescriptor = mock(ColumnFamilyDescriptor.class);
+ when(tableDescriptor.getTableName()).thenReturn(TableName.valueOf("test"));
+ when(familyDescriptor.getNameAsString()).thenReturn("family");
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(tableDescriptor, familyDescriptor);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromStoreContext() {
+ // Test store context path construction
+ TableName tableName = TableName.valueOf("test");
+ RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
+ HRegionFileSystem regionFileSystem = mock(HRegionFileSystem.class);
+ when(regionFileSystem.getRegionInfo()).thenReturn(regionInfo);
+
+ ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of("family");
+
+ StoreContext storeContext = StoreContext.getBuilder().withRegionFileSystem(regionFileSystem)
+ .withColumnFamilyDescriptor(familyDescriptor).build();
+
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeContext);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromStoreFileInfo_RegularFile() {
+ // Test both regular files and linked files
+ StoreFileInfo storeFileInfo = mock(StoreFileInfo.class);
+ when(storeFileInfo.isLink()).thenReturn(false);
+ Path path = KeymetaTestUtils.createMockPath("test", "family");
+ when(storeFileInfo.getPath()).thenReturn(path);
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeFileInfo);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromStoreFileInfo_LinkedFile() {
+ // Test both regular files and linked files
+ StoreFileInfo storeFileInfo = mock(StoreFileInfo.class);
+ HFileLink link = mock(HFileLink.class);
+ when(storeFileInfo.isLink()).thenReturn(true);
+ Path path = KeymetaTestUtils.createMockPath("test", "family");
+ when(link.getOriginPath()).thenReturn(path);
+ when(storeFileInfo.getLink()).thenReturn(link);
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeFileInfo);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromPath() {
+ // Test path parsing with different HBase directory structures
+ Path path = KeymetaTestUtils.createMockPath("test", "family");
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(path);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromStrings() {
+ // Test string-based construction
+ String tableName = "test";
+ String family = "family";
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(tableName, family);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_NullChecks() {
+ // Test null inputs for both table name and family
+ assertThrows(NullPointerException.class,
+ () -> KeyNamespaceUtil.constructKeyNamespace(null, "family"));
+ assertThrows(NullPointerException.class,
+ () -> KeyNamespaceUtil.constructKeyNamespace("test", null));
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
new file mode 100644
index 000000000000..0e9c0eae2393
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
@@ -0,0 +1,561 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.ArgumentMatchers.contains;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.withSettings;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import javax.crypto.spec.SecretKeySpec;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.keymeta.KeymetaServiceEndpoint.KeymetaAdminServiceImpl;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.GetManagedKeysResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyEntryRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyState;
+
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaEndpoint {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaEndpoint.class);
+
+ private static final String KEY_CUST = "keyCust";
+ private static final String KEY_NAMESPACE = "keyNamespace";
+ private static final String KEY_METADATA1 = "keyMetadata1";
+ private static final String KEY_METADATA2 = "keyMetadata2";
+
+ @Mock
+ private RpcController controller;
+ @Mock
+ private MasterServices master;
+ @Mock
+ private RpcCallback enableKeyManagementDone;
+ @Mock
+ private RpcCallback getManagedKeysDone;
+ @Mock
+ private RpcCallback disableKeyManagementDone;
+ @Mock
+ private RpcCallback disableManagedKeyDone;
+ @Mock
+ private RpcCallback rotateManagedKeyDone;
+ @Mock
+ private RpcCallback refreshManagedKeysDone;
+
+ KeymetaServiceEndpoint keymetaServiceEndpoint;
+ private ManagedKeyResponse.Builder responseBuilder;
+ private ManagedKeyRequest.Builder requestBuilder;
+ private KeymetaAdminServiceImpl keyMetaAdminService;
+ private ManagedKeyData keyData1;
+ private ManagedKeyData keyData2;
+
+ @Mock
+ private KeymetaAdmin keymetaAdmin;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ keymetaServiceEndpoint = new KeymetaServiceEndpoint();
+ CoprocessorEnvironment env =
+ mock(CoprocessorEnvironment.class, withSettings().extraInterfaces(HasMasterServices.class));
+ when(((HasMasterServices) env).getMasterServices()).thenReturn(master);
+ keymetaServiceEndpoint.start(env);
+ keyMetaAdminService =
+ (KeymetaAdminServiceImpl) keymetaServiceEndpoint.getServices().iterator().next();
+ responseBuilder = ManagedKeyResponse.newBuilder().setKeyState(ManagedKeyState.KEY_ACTIVE);
+ requestBuilder =
+ ManagedKeyRequest.newBuilder().setKeyNamespace(ManagedKeyData.KEY_SPACE_GLOBAL);
+ keyData1 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE,
+ new SecretKeySpec("key1".getBytes(), "AES"), ACTIVE, KEY_METADATA1);
+ keyData2 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE,
+ new SecretKeySpec("key2".getBytes(), "AES"), ACTIVE, KEY_METADATA2);
+ when(master.getKeymetaAdmin()).thenReturn(keymetaAdmin);
+ }
+
+ @Test
+ public void testCreateResponseBuilderValid() throws IOException {
+ byte[] cust = "testKey".getBytes();
+ ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.copyFrom(cust)).build();
+
+ ManagedKeyResponse.Builder result = ManagedKeyResponse.newBuilder();
+ KeymetaServiceEndpoint.initManagedKeyResponseBuilder(controller, request, result);
+
+ assertNotNull(result);
+ assertArrayEquals(cust, result.getKeyCust().toByteArray());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testCreateResponseBuilderEmptyCust() throws IOException {
+ ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.EMPTY).build();
+
+ IOException exception = assertThrows(IOException.class, () -> KeymetaServiceEndpoint
+ .initManagedKeyResponseBuilder(controller, request, ManagedKeyResponse.newBuilder()));
+
+ assertEquals("key_cust must not be empty", exception.getMessage());
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse() throws Exception {
+ // Arrange
+ ManagedKeyResponse response =
+ responseBuilder.setKeyCust(ByteString.copyFrom(keyData1.getKeyCustodian()))
+ .setKeyNamespace(keyData1.getKeyNamespace()).build();
+ List managedKeyStates = Arrays.asList(keyData1, keyData2);
+
+ // Act
+ GetManagedKeysResponse result =
+ KeymetaServiceEndpoint.generateKeyStateResponse(managedKeyStates, responseBuilder);
+
+ // Assert
+ assertNotNull(response);
+ assertNotNull(result.getStateList());
+ assertEquals(2, result.getStateList().size());
+ assertEquals(ManagedKeyState.KEY_ACTIVE, result.getStateList().get(0).getKeyState());
+ assertEquals(0, Bytes.compareTo(keyData1.getKeyCustodian(),
+ result.getStateList().get(0).getKeyCust().toByteArray()));
+ assertEquals(keyData1.getKeyNamespace(), result.getStateList().get(0).getKeyNamespace());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_Empty() throws Exception {
+ // Arrange
+ ManagedKeyResponse response =
+ responseBuilder.setKeyCust(ByteString.copyFrom(keyData1.getKeyCustodian()))
+ .setKeyNamespace(keyData1.getKeyNamespace()).build();
+ List managedKeyStates = new ArrayList<>();
+
+ // Act
+ GetManagedKeysResponse result =
+ KeymetaServiceEndpoint.generateKeyStateResponse(managedKeyStates, responseBuilder);
+
+ // Assert
+ assertNotNull(response);
+ assertNotNull(result.getStateList());
+ assertEquals(0, result.getStateList().size());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testGenerateKeyStatResponse_Success() throws Exception {
+ doTestServiceCallForSuccess((controller, request, done) -> keyMetaAdminService
+ .enableKeyManagement(controller, request, done), enableKeyManagementDone);
+ }
+
+ @Test
+ public void testGetManagedKeys_Success() throws Exception {
+ doTestServiceCallForSuccess(
+ (controller, request, done) -> keyMetaAdminService.getManagedKeys(controller, request, done),
+ getManagedKeysDone);
+ }
+
+ private void doTestServiceCallForSuccess(ServiceCall svc, RpcCallback done)
+ throws Exception {
+ // Arrange
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+ when(keymetaAdmin.enableKeyManagement(any(), any())).thenReturn(keyData1);
+
+ // Act
+ svc.call(controller, request, done);
+
+ // Assert
+ verify(done).run(any());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ private interface ServiceCall {
+ void call(RpcController controller, ManagedKeyRequest request, RpcCallback done)
+ throws Exception;
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_InvalidCust() throws Exception {
+ // Arrange
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.EMPTY).setKeyNamespace(KEY_NAMESPACE).build();
+
+ // Act
+ keyMetaAdminService.enableKeyManagement(controller, request, enableKeyManagementDone);
+
+ // Assert
+ verify(controller).setFailed(contains("key_cust must not be empty"));
+ verify(keymetaAdmin, never()).enableKeyManagement(any(), any());
+ verify(enableKeyManagementDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_IOException() throws Exception {
+ // Arrange
+ when(keymetaAdmin.enableKeyManagement(any(), any())).thenThrow(IOException.class);
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+
+ // Act
+ keyMetaAdminService.enableKeyManagement(controller, request, enableKeyManagementDone);
+
+ // Assert
+ verify(controller).setFailed(contains("IOException"));
+ verify(keymetaAdmin).enableKeyManagement(any(), any());
+ verify(enableKeyManagementDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testGetManagedKeys_IOException() throws Exception {
+ doTestGetManagedKeysError(IOException.class);
+ }
+
+ @Test
+ public void testGetManagedKeys_KeyException() throws Exception {
+ doTestGetManagedKeysError(KeyException.class);
+ }
+
+ private void doTestGetManagedKeysError(Class extends Exception> exType) throws Exception {
+ // Arrange
+ when(keymetaAdmin.getManagedKeys(any(), any())).thenThrow(exType);
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+
+ // Act
+ keyMetaAdminService.getManagedKeys(controller, request, getManagedKeysDone);
+
+ // Assert
+ verify(controller).setFailed(contains(exType.getSimpleName()));
+ verify(keymetaAdmin).getManagedKeys(any(), any());
+ verify(getManagedKeysDone).run(GetManagedKeysResponse.getDefaultInstance());
+ }
+
+ @Test
+ public void testGetManagedKeys_InvalidCust() throws Exception {
+ // Arrange
+ ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.EMPTY).build();
+
+ keyMetaAdminService.getManagedKeys(controller, request, getManagedKeysDone);
+
+ verify(controller).setFailed(contains("key_cust must not be empty"));
+ verify(keymetaAdmin, never()).getManagedKeys(any(), any());
+ verify(getManagedKeysDone).run(argThat(response -> response.getStateList().isEmpty()));
+ }
+
+ @Test
+ public void testDisableKeyManagement_Success() throws Exception {
+ // Arrange
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+ ManagedKeyData disabledKey = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE, DISABLED);
+ when(keymetaAdmin.disableKeyManagement(any(), any())).thenReturn(disabledKey);
+ // Act
+ keyMetaAdminService.disableKeyManagement(controller, request, disableKeyManagementDone);
+
+ // Assert
+ verify(controller, never()).setFailed(anyString());
+ verify(disableKeyManagementDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_INACTIVE));
+ }
+
+ @Test
+ public void testDisableKeyManagement_IOException() throws Exception {
+ doTestDisableKeyManagementError(IOException.class);
+ }
+
+ @Test
+ public void testDisableKeyManagement_KeyException() throws Exception {
+ doTestDisableKeyManagementError(KeyException.class);
+ }
+
+ private void doTestDisableKeyManagementError(Class extends Exception> exType) throws Exception {
+ // Arrange
+ when(keymetaAdmin.disableKeyManagement(any(), any())).thenThrow(exType);
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+
+ // Act
+ keyMetaAdminService.disableKeyManagement(controller, request, disableKeyManagementDone);
+
+ // Assert
+ verify(controller).setFailed(contains(exType.getSimpleName()));
+ verify(keymetaAdmin).disableKeyManagement(any(), any());
+ verify(disableKeyManagementDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testDisableKeyManagement_InvalidCust() throws Exception {
+ // Arrange
+ ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.EMPTY).build();
+
+ keyMetaAdminService.disableKeyManagement(controller, request, disableKeyManagementDone);
+
+ verify(controller).setFailed(contains("key_cust must not be empty"));
+ verify(keymetaAdmin, never()).disableKeyManagement(any(), any());
+ verify(disableKeyManagementDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testDisableKeyManagement_InvalidNamespace() throws Exception {
+ // Arrange
+ ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes()))
+ .setKeyNamespace("").build();
+
+ keyMetaAdminService.disableKeyManagement(controller, request, disableKeyManagementDone);
+
+ verify(controller).setFailed(contains("key_namespace must not be empty"));
+ verify(keymetaAdmin, never()).disableKeyManagement(any(), any());
+ verify(disableKeyManagementDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testDisableManagedKey_Success() throws Exception {
+ // Arrange
+ ManagedKeyEntryRequest request = ManagedKeyEntryRequest.newBuilder()
+ .setKeyCustNs(requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build())
+ .setKeyMetadataHash(ByteString.copyFrom(keyData1.getKeyMetadataHash())).build();
+ when(keymetaAdmin.disableManagedKey(any(), any(), any())).thenReturn(keyData1);
+
+ // Act
+ keyMetaAdminService.disableManagedKey(controller, request, disableManagedKeyDone);
+
+ // Assert
+ verify(disableManagedKeyDone).run(any());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testDisableManagedKey_IOException() throws Exception {
+ doTestDisableManagedKeyError(IOException.class);
+ }
+
+ @Test
+ public void testDisableManagedKey_KeyException() throws Exception {
+ doTestDisableManagedKeyError(KeyException.class);
+ }
+
+ private void doTestDisableManagedKeyError(Class extends Exception> exType) throws Exception {
+ // Arrange
+ when(keymetaAdmin.disableManagedKey(any(), any(), any())).thenThrow(exType);
+ ManagedKeyEntryRequest request = ManagedKeyEntryRequest.newBuilder()
+ .setKeyCustNs(requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build())
+ .setKeyMetadataHash(ByteString.copyFrom(keyData1.getKeyMetadataHash())).build();
+
+ // Act
+ keyMetaAdminService.disableManagedKey(controller, request, disableManagedKeyDone);
+
+ // Assert
+ verify(controller).setFailed(contains(exType.getSimpleName()));
+ verify(keymetaAdmin).disableManagedKey(any(), any(), any());
+ verify(disableManagedKeyDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testDisableManagedKey_InvalidCust() throws Exception {
+ // Arrange
+ ManagedKeyEntryRequest request = ManagedKeyEntryRequest.newBuilder()
+ .setKeyCustNs(
+ requestBuilder.setKeyCust(ByteString.EMPTY).setKeyNamespace(KEY_NAMESPACE).build())
+ .setKeyMetadataHash(ByteString.copyFrom(keyData1.getKeyMetadataHash())).build();
+
+ keyMetaAdminService.disableManagedKey(controller, request, disableManagedKeyDone);
+
+ verify(controller).setFailed(contains("key_cust must not be empty"));
+ verify(keymetaAdmin, never()).disableManagedKey(any(), any(), any());
+ verify(disableManagedKeyDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testDisableManagedKey_InvalidNamespace() throws Exception {
+ // Arrange
+ ManagedKeyEntryRequest request = ManagedKeyEntryRequest.newBuilder()
+ .setKeyCustNs(requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes()))
+ .setKeyNamespace("").build())
+ .setKeyMetadataHash(ByteString.copyFrom(keyData1.getKeyMetadataHash())).build();
+
+ keyMetaAdminService.disableManagedKey(controller, request, disableManagedKeyDone);
+
+ verify(controller).setFailed(contains("key_namespace must not be empty"));
+ verify(keymetaAdmin, never()).disableManagedKey(any(), any(), any());
+ verify(disableManagedKeyDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testRotateManagedKey_Success() throws Exception {
+ // Arrange
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+ when(keymetaAdmin.rotateManagedKey(any(), any())).thenReturn(keyData1);
+
+ // Act
+ keyMetaAdminService.rotateManagedKey(controller, request, rotateManagedKeyDone);
+
+ // Assert
+ verify(rotateManagedKeyDone).run(any());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testRotateManagedKey_IOException() throws Exception {
+ doTestRotateManagedKeyError(IOException.class);
+ }
+
+ @Test
+ public void testRotateManagedKey_KeyException() throws Exception {
+ doTestRotateManagedKeyError(KeyException.class);
+ }
+
+ private void doTestRotateManagedKeyError(Class extends Exception> exType) throws Exception {
+ // Arrange
+ when(keymetaAdmin.rotateManagedKey(any(), any())).thenThrow(exType);
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+
+ // Act
+ keyMetaAdminService.rotateManagedKey(controller, request, rotateManagedKeyDone);
+
+ // Assert
+ verify(controller).setFailed(contains(exType.getSimpleName()));
+ verify(keymetaAdmin).rotateManagedKey(any(), any());
+ verify(rotateManagedKeyDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testRotateManagedKey_InvalidCust() throws Exception {
+ // Arrange
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.EMPTY).setKeyNamespace(KEY_NAMESPACE).build();
+
+ keyMetaAdminService.rotateManagedKey(controller, request, rotateManagedKeyDone);
+
+ verify(controller).setFailed(contains("key_cust must not be empty"));
+ verify(keymetaAdmin, never()).rotateManagedKey(any(), any());
+ verify(rotateManagedKeyDone)
+ .run(argThat(response -> response.getKeyState() == ManagedKeyState.KEY_FAILED));
+ }
+
+ @Test
+ public void testRefreshManagedKeys_Success() throws Exception {
+ // Arrange
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+
+ // Act
+ keyMetaAdminService.refreshManagedKeys(controller, request, refreshManagedKeysDone);
+
+ // Assert
+ verify(refreshManagedKeysDone).run(any());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testRefreshManagedKeys_IOException() throws Exception {
+ doTestRefreshManagedKeysError(IOException.class);
+ }
+
+ @Test
+ public void testRefreshManagedKeys_KeyException() throws Exception {
+ doTestRefreshManagedKeysError(KeyException.class);
+ }
+
+ private void doTestRefreshManagedKeysError(Class extends Exception> exType) throws Exception {
+ // Arrange
+ Mockito.doThrow(exType).when(keymetaAdmin).refreshManagedKeys(any(), any());
+ ManagedKeyRequest request =
+ requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build();
+
+ // Act
+ keyMetaAdminService.refreshManagedKeys(controller, request, refreshManagedKeysDone);
+
+ // Assert
+ verify(controller).setFailed(contains(exType.getSimpleName()));
+ verify(keymetaAdmin).refreshManagedKeys(any(), any());
+ verify(refreshManagedKeysDone).run(EmptyMsg.getDefaultInstance());
+ }
+
+ @Test
+ public void testRefreshManagedKeys_InvalidCust() throws Exception {
+ // Arrange
+ ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.EMPTY).build();
+
+ keyMetaAdminService.refreshManagedKeys(controller, request, refreshManagedKeysDone);
+
+ verify(controller).setFailed(contains("key_cust must not be empty"));
+ verify(keymetaAdmin, never()).refreshManagedKeys(any(), any());
+ verify(refreshManagedKeysDone).run(EmptyMsg.getDefaultInstance());
+ }
+
+ @Test
+ public void testRefreshManagedKeys_InvalidNamespace() throws Exception {
+ // Arrange
+ ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes()))
+ .setKeyNamespace("").build();
+
+ // Act
+ keyMetaAdminService.refreshManagedKeys(controller, request, refreshManagedKeysDone);
+
+ // Assert
+ verify(controller).setFailed(contains("key_namespace must not be empty"));
+ verify(keymetaAdmin, never()).refreshManagedKeys(any(), any());
+ verify(refreshManagedKeysDone).run(EmptyMsg.getDefaultInstance());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
new file mode 100644
index 000000000000..fde1d81481c1
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
@@ -0,0 +1,591 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE_DISABLED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE_DISABLED;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_CHECKSUM_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_METADATA_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_WRAPPED_BY_STK_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.KEY_META_INFO_FAMILY;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.KEY_STATE_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.REFRESHED_TIMESTAMP_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.STK_CHECKSUM_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.constructRowKeyForCustNamespace;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.constructRowKeyForMetadata;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.parseFromResult;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Suite;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Captor;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestKeymetaTableAccessor.TestAdd.class,
+ TestKeymetaTableAccessor.TestAddWithNullableFields.class, TestKeymetaTableAccessor.TestGet.class,
+ TestKeymetaTableAccessor.TestDisableKey.class,
+ TestKeymetaTableAccessor.TestUpdateActiveState.class, })
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaTableAccessor {
+ protected static final String ALIAS = "custId1";
+ protected static final byte[] CUST_ID = ALIAS.getBytes();
+ protected static final String KEY_NAMESPACE = "namespace";
+ protected static String KEY_METADATA = "metadata1";
+
+ @Mock
+ protected MasterServices server;
+ @Mock
+ protected Connection connection;
+ @Mock
+ protected Table table;
+ @Mock
+ protected ResultScanner scanner;
+ @Mock
+ protected SystemKeyCache systemKeyCache;
+ @Mock
+ protected KeyManagementService keyManagementService;
+
+ protected KeymetaTableAccessor accessor;
+ protected Configuration conf = HBaseConfiguration.create();
+ protected MockManagedKeyProvider managedKeyProvider;
+ protected ManagedKeyData latestSystemKey;
+
+ private AutoCloseable closeableMocks;
+
+ @Before
+ public void setUp() throws Exception {
+ closeableMocks = MockitoAnnotations.openMocks(this);
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
+
+ when(server.getConnection()).thenReturn(connection);
+ when(connection.getTable(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(table);
+ when(server.getSystemKeyCache()).thenReturn(systemKeyCache);
+ when(server.getConfiguration()).thenReturn(conf);
+ when(server.getKeyManagementService()).thenReturn(keyManagementService);
+ when(keyManagementService.getConfiguration()).thenReturn(conf);
+ when(keyManagementService.getSystemKeyCache()).thenReturn(systemKeyCache);
+
+ accessor = new KeymetaTableAccessor(server);
+ managedKeyProvider = new MockManagedKeyProvider();
+ managedKeyProvider.initConfig(conf, "");
+
+ latestSystemKey = managedKeyProvider.getSystemKey("system-id".getBytes());
+ when(systemKeyCache.getLatestSystemKey()).thenReturn(latestSystemKey);
+ when(systemKeyCache.getSystemKeyByChecksum(anyLong())).thenReturn(latestSystemKey);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ closeableMocks.close();
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAdd extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdd.class);
+
+ @Parameter(0)
+ public ManagedKeyState keyState;
+
+ @Parameterized.Parameters(name = "{index},keyState={0}")
+ public static Collection data() {
+ return Arrays.asList(new Object[][] { { ACTIVE }, { FAILED }, { INACTIVE }, { DISABLED }, });
+ }
+
+ @Captor
+ private ArgumentCaptor> putCaptor;
+
+ @Test
+ public void testAddKey() throws Exception {
+ managedKeyProvider.setMockedKeyState(ALIAS, keyState);
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+
+ accessor.addKey(keyData);
+
+ verify(table).put(putCaptor.capture());
+ List puts = putCaptor.getValue();
+ assertEquals(keyState == ACTIVE ? 2 : 1, puts.size());
+ if (keyState == ACTIVE) {
+ assertPut(keyData, puts.get(0), constructRowKeyForCustNamespace(keyData), ACTIVE);
+ assertPut(keyData, puts.get(1), constructRowKeyForMetadata(keyData), ACTIVE);
+ } else {
+ assertPut(keyData, puts.get(0), constructRowKeyForMetadata(keyData), keyState);
+ }
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAddWithNullableFields extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAddWithNullableFields.class);
+
+ @Captor
+ private ArgumentCaptor> batchCaptor;
+
+ @Test
+ public void testAddKeyManagementStateMarker() throws Exception {
+ managedKeyProvider.setMockedKeyState(ALIAS, FAILED);
+ ManagedKeyData keyData = new ManagedKeyData(CUST_ID, KEY_SPACE_GLOBAL, FAILED);
+
+ accessor.addKeyManagementStateMarker(keyData.getKeyCustodian(), keyData.getKeyNamespace(),
+ keyData.getKeyState());
+
+ verify(table).batch(batchCaptor.capture(), any());
+ List mutations = batchCaptor.getValue();
+ assertEquals(2, mutations.size());
+ Mutation mutation1 = mutations.get(0);
+ Mutation mutation2 = mutations.get(1);
+ assertTrue(mutation1 instanceof Put);
+ assertTrue(mutation2 instanceof Delete);
+ Put put = (Put) mutation1;
+ Delete delete = (Delete) mutation2;
+
+ // Verify the row key uses state value for metadata hash
+ byte[] expectedRowKey = constructRowKeyForCustNamespace(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(0, Bytes.compareTo(expectedRowKey, put.getRow()));
+
+ Map valueMap = getValueMap(put);
+
+ // Verify key-related columns are not present
+ assertNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES)));
+ assertNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES)));
+ assertNull(valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES)));
+
+ assertEquals(Durability.SKIP_WAL, put.getDurability());
+ assertEquals(HConstants.SYSTEMTABLE_QOS, put.getPriority());
+
+ // Verify state is set correctly
+ assertEquals(new Bytes(new byte[] { FAILED.getVal() }),
+ valueMap.get(new Bytes(KEY_STATE_QUAL_BYTES)));
+
+ // Verify the delete operation properties
+ assertEquals(Durability.SKIP_WAL, delete.getDurability());
+ assertEquals(HConstants.SYSTEMTABLE_QOS, delete.getPriority());
+
+ // Verify the row key is correct for a failure marker
+ assertEquals(0, Bytes.compareTo(expectedRowKey, delete.getRow()));
+ // Verify the key checksum, wrapped key, and STK checksum columns are deleted
+ assertDeleteColumns(delete);
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestGet extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGet.class);
+
+ @Mock
+ private Result result1;
+ @Mock
+ private Result result2;
+
+ private String keyMetadata2 = "metadata2";
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+
+ when(result1.isEmpty()).thenReturn(false);
+ when(result2.isEmpty()).thenReturn(false);
+ when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES)))
+ .thenReturn(new byte[] { ACTIVE.getVal() });
+ when(result2.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES)))
+ .thenReturn(new byte[] { FAILED.getVal() });
+ for (Result result : Arrays.asList(result1, result2)) {
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(REFRESHED_TIMESTAMP_QUAL_BYTES)))
+ .thenReturn(Bytes.toBytes(0L));
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(STK_CHECKSUM_QUAL_BYTES)))
+ .thenReturn(Bytes.toBytes(0L));
+ }
+ when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES)))
+ .thenReturn(KEY_METADATA.getBytes());
+ when(result2.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES)))
+ .thenReturn(keyMetadata2.getBytes());
+ }
+
+ @Test
+ public void testParseEmptyResult() throws Exception {
+ Result result = mock(Result.class);
+ when(result.isEmpty()).thenReturn(true);
+
+ assertNull(parseFromResult(server, CUST_ID, KEY_NAMESPACE, null));
+ assertNull(parseFromResult(server, CUST_ID, KEY_NAMESPACE, result));
+ }
+
+ @Test
+ public void testGetActiveKeyMissingWrappedKey() throws Exception {
+ Result result = mock(Result.class);
+ when(table.get(any(Get.class))).thenReturn(result);
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES)))
+ .thenReturn(new byte[] { ACTIVE.getVal() }, new byte[] { INACTIVE.getVal() });
+
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash(KEY_METADATA);
+ IOException ex;
+ ex = assertThrows(IOException.class,
+ () -> accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, keyMetadataHash));
+ assertEquals("ACTIVE key must have a wrapped key", ex.getMessage());
+ ex = assertThrows(IOException.class,
+ () -> accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, keyMetadataHash));
+ assertEquals("INACTIVE key must have a wrapped key", ex.getMessage());
+ }
+
+ @Test
+ public void testGetKeyMissingSTK() throws Exception {
+ when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_WRAPPED_BY_STK_QUAL_BYTES)))
+ .thenReturn(new byte[] { 0 });
+ when(systemKeyCache.getSystemKeyByChecksum(anyLong())).thenReturn(null);
+ when(table.get(any(Get.class))).thenReturn(result1);
+
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash(KEY_METADATA);
+ ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, keyMetadataHash);
+
+ assertNull(result);
+ }
+
+ @Test
+ public void testGetKeyWithWrappedKey() throws Exception {
+ ManagedKeyData keyData = setupActiveKey(CUST_ID, result1);
+
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash(keyData.getKeyMetadata());
+ ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, keyMetadataHash);
+
+ verify(table).get(any(Get.class));
+ assertNotNull(result);
+ assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian()));
+ assertEquals(KEY_NAMESPACE, result.getKeyNamespace());
+ assertEquals(keyData.getKeyMetadata(), result.getKeyMetadata());
+ assertEquals(0,
+ Bytes.compareTo(keyData.getTheKey().getEncoded(), result.getTheKey().getEncoded()));
+ assertEquals(ACTIVE, result.getKeyState());
+
+ // When DEK checksum doesn't match, we expect a null value.
+ result = accessor.getKey(CUST_ID, KEY_NAMESPACE, keyMetadataHash);
+ assertNull(result);
+ }
+
+ @Test
+ public void testGetKeyWithoutWrappedKey() throws Exception {
+ when(table.get(any(Get.class))).thenReturn(result2);
+
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash(keyMetadata2);
+ ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, keyMetadataHash);
+
+ verify(table).get(any(Get.class));
+ assertNotNull(result);
+ assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian()));
+ assertEquals(KEY_NAMESPACE, result.getKeyNamespace());
+ assertEquals(keyMetadata2, result.getKeyMetadata());
+ assertNull(result.getTheKey());
+ assertEquals(FAILED, result.getKeyState());
+ }
+
+ @Test
+ public void testGetAllKeys() throws Exception {
+ ManagedKeyData keyData = setupActiveKey(CUST_ID, result1);
+
+ when(scanner.iterator()).thenReturn(List.of(result1, result2).iterator());
+ when(table.getScanner(any(Scan.class))).thenReturn(scanner);
+
+ List allKeys = accessor.getAllKeys(CUST_ID, KEY_NAMESPACE, true);
+
+ assertEquals(2, allKeys.size());
+ assertEquals(keyData.getKeyMetadata(), allKeys.get(0).getKeyMetadata());
+ assertEquals(keyMetadata2, allKeys.get(1).getKeyMetadata());
+ verify(table).getScanner(any(Scan.class));
+ }
+
+ @Test
+ public void testGetActiveKey() throws Exception {
+ ManagedKeyData keyData = setupActiveKey(CUST_ID, result1);
+
+ when(scanner.iterator()).thenReturn(List.of(result1).iterator());
+ when(table.get(any(Get.class))).thenReturn(result1);
+
+ ManagedKeyData activeKey = accessor.getKeyManagementStateMarker(CUST_ID, KEY_NAMESPACE);
+
+ assertNotNull(activeKey);
+ assertEquals(keyData, activeKey);
+ verify(table).get(any(Get.class));
+ }
+
+ private ManagedKeyData setupActiveKey(byte[] custId, Result result) throws Exception {
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(custId, KEY_NAMESPACE);
+ byte[] dekWrappedBySTK =
+ EncryptionUtil.wrapKey(conf, null, keyData.getTheKey(), latestSystemKey.getTheKey());
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_WRAPPED_BY_STK_QUAL_BYTES)))
+ .thenReturn(dekWrappedBySTK);
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_CHECKSUM_QUAL_BYTES)))
+ .thenReturn(Bytes.toBytes(keyData.getKeyChecksum()), Bytes.toBytes(0L));
+ // Update the mock to return the correct metadata from the keyData
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES)))
+ .thenReturn(keyData.getKeyMetadata().getBytes());
+ when(table.get(any(Get.class))).thenReturn(result);
+ return keyData;
+ }
+ }
+
+ protected void assertPut(ManagedKeyData keyData, Put put, byte[] rowKey,
+ ManagedKeyState targetState) {
+ assertEquals(Durability.SKIP_WAL, put.getDurability());
+ assertEquals(HConstants.SYSTEMTABLE_QOS, put.getPriority());
+ assertTrue(Bytes.compareTo(rowKey, put.getRow()) == 0);
+
+ Map valueMap = getValueMap(put);
+
+ if (keyData.getTheKey() != null) {
+ assertNotNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES)));
+ assertNotNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES)));
+ assertEquals(new Bytes(Bytes.toBytes(latestSystemKey.getKeyChecksum())),
+ valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES)));
+ } else {
+ assertNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES)));
+ assertNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES)));
+ assertNull(valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES)));
+ }
+ assertEquals(new Bytes(keyData.getKeyMetadata().getBytes()),
+ valueMap.get(new Bytes(DEK_METADATA_QUAL_BYTES)));
+ assertNotNull(valueMap.get(new Bytes(REFRESHED_TIMESTAMP_QUAL_BYTES)));
+ assertEquals(new Bytes(new byte[] { targetState.getVal() }),
+ valueMap.get(new Bytes(KEY_STATE_QUAL_BYTES)));
+ }
+
+ // Verify the key checksum, wrapped key, and STK checksum columns are deleted
+ private static void assertDeleteColumns(Delete delete) {
+ Map> familyCellMap = delete.getFamilyCellMap();
+ assertTrue(familyCellMap.containsKey(KEY_META_INFO_FAMILY));
+
+ List cells = familyCellMap.get(KEY_META_INFO_FAMILY);
+ assertEquals(3, cells.size());
+
+ // Verify each column is present in the delete
+ Set qualifiers =
+ cells.stream().map(CellUtil::cloneQualifier).collect(Collectors.toSet());
+
+ assertTrue(qualifiers.stream().anyMatch(q -> Bytes.equals(q, DEK_CHECKSUM_QUAL_BYTES)));
+ assertTrue(qualifiers.stream().anyMatch(q -> Bytes.equals(q, DEK_WRAPPED_BY_STK_QUAL_BYTES)));
+ assertTrue(qualifiers.stream().anyMatch(q -> Bytes.equals(q, STK_CHECKSUM_QUAL_BYTES)));
+ }
+
+ private static Map getValueMap(Mutation mutation) {
+ NavigableMap> familyCellMap = mutation.getFamilyCellMap();
+ List cells = familyCellMap.get(KEY_META_INFO_FAMILY);
+ Map valueMap = new HashMap<>();
+ for (Cell cell : cells) {
+ valueMap.put(
+ new Bytes(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()),
+ new Bytes(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
+ }
+ return valueMap;
+ }
+
+ /**
+ * Tests for disableKey() method.
+ */
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestDisableKey extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestDisableKey.class);
+
+ // Parameterize the key state
+ @Parameter(0)
+ public ManagedKeyState keyState;
+
+ @Captor
+ private ArgumentCaptor> mutationsCaptor;
+
+ @Parameterized.Parameters(name = "{index},keyState={0}")
+ public static Collection data() {
+ return Arrays.asList(new Object[][] { { ACTIVE }, { INACTIVE }, { ACTIVE_DISABLED },
+ { INACTIVE_DISABLED }, { FAILED }, });
+ }
+
+ @Test
+ public void testDisableKey() throws Exception {
+ ManagedKeyData keyData =
+ new ManagedKeyData(CUST_ID, KEY_NAMESPACE, null, keyState, "testMetadata");
+
+ accessor.disableKey(keyData);
+
+ verify(table).batch(mutationsCaptor.capture(), any());
+ List mutations = mutationsCaptor.getValue();
+ assertEquals(keyState == ACTIVE ? 3 : keyState == INACTIVE ? 2 : 1, mutations.size());
+ int putIndex = 0;
+ ManagedKeyState targetState = keyState == ACTIVE ? ACTIVE_DISABLED : INACTIVE_DISABLED;
+ if (keyState == ACTIVE) {
+ assertTrue(
+ Bytes.compareTo(constructRowKeyForCustNamespace(keyData), mutations.get(0).getRow())
+ == 0);
+ ++putIndex;
+ }
+ assertPut(keyData, (Put) mutations.get(putIndex), constructRowKeyForMetadata(keyData),
+ targetState);
+ if (keyState == INACTIVE) {
+ assertTrue(
+ Bytes.compareTo(constructRowKeyForMetadata(keyData), mutations.get(putIndex + 1).getRow())
+ == 0);
+ // Verify the key checksum, wrapped key, and STK checksum columns are deleted
+ assertDeleteColumns((Delete) mutations.get(putIndex + 1));
+ }
+ }
+ }
+
+ /**
+ * Tests for updateActiveState() method.
+ */
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestUpdateActiveState extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestUpdateActiveState.class);
+
+ @Captor
+ private ArgumentCaptor> mutationsCaptor;
+
+ @Test
+ public void testUpdateActiveStateFromInactiveToActive() throws Exception {
+ ManagedKeyData keyData =
+ new ManagedKeyData(CUST_ID, KEY_NAMESPACE, null, INACTIVE, "metadata", 123L);
+ ManagedKeyData systemKey =
+ new ManagedKeyData(new byte[] { 1 }, KEY_SPACE_GLOBAL, null, ACTIVE, "syskey", 100L);
+ when(systemKeyCache.getLatestSystemKey()).thenReturn(systemKey);
+
+ accessor.updateActiveState(keyData, ACTIVE);
+
+ verify(table).batch(mutationsCaptor.capture(), any());
+ List mutations = mutationsCaptor.getValue();
+ assertEquals(2, mutations.size());
+ }
+
+ @Test
+ public void testUpdateActiveStateFromActiveToInactive() throws Exception {
+ ManagedKeyData keyData =
+ new ManagedKeyData(CUST_ID, KEY_NAMESPACE, null, ACTIVE, "metadata", 123L);
+
+ accessor.updateActiveState(keyData, INACTIVE);
+
+ verify(table).batch(mutationsCaptor.capture(), any());
+ List mutations = mutationsCaptor.getValue();
+ assertEquals(2, mutations.size());
+ }
+
+ @Test
+ public void testUpdateActiveStateNoOp() throws Exception {
+ ManagedKeyData keyData =
+ new ManagedKeyData(CUST_ID, KEY_NAMESPACE, null, ACTIVE, "metadata", 123L);
+
+ accessor.updateActiveState(keyData, ACTIVE);
+
+ verify(table, Mockito.never()).batch(any(), any());
+ }
+
+ @Test
+ public void testUpdateActiveStateFromDisabledToActive() throws Exception {
+ ManagedKeyData keyData =
+ new ManagedKeyData(CUST_ID, KEY_NAMESPACE, null, DISABLED, "metadata", 123L);
+ ManagedKeyData systemKey =
+ new ManagedKeyData(new byte[] { 1 }, KEY_SPACE_GLOBAL, null, ACTIVE, "syskey", 100L);
+ when(systemKeyCache.getLatestSystemKey()).thenReturn(systemKey);
+
+ accessor.updateActiveState(keyData, ACTIVE);
+
+ verify(table).batch(mutationsCaptor.capture(), any());
+ List mutations = mutationsCaptor.getValue();
+ // Should have 2 mutations: add CustNamespace row and add all columns to Metadata row
+ assertEquals(2, mutations.size());
+ }
+
+ @Test
+ public void testUpdateActiveStateInvalidNewState() {
+ ManagedKeyData keyData =
+ new ManagedKeyData(CUST_ID, KEY_NAMESPACE, null, ACTIVE, "metadata", 123L);
+
+ assertThrows(IllegalArgumentException.class,
+ () -> accessor.updateActiveState(keyData, DISABLED));
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
new file mode 100644
index 000000000000..0b00df9e57b6
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
@@ -0,0 +1,862 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.clearInvocations;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.stream.Collectors;
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+import net.bytebuddy.implementation.bind.annotation.AllArguments;
+import net.bytebuddy.implementation.bind.annotation.Origin;
+import net.bytebuddy.implementation.bind.annotation.RuntimeType;
+import net.bytebuddy.matcher.ElementMatchers;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Suite;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.Spy;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestManagedKeyDataCache.TestGeneric.class,
+ TestManagedKeyDataCache.TestWithoutL2Cache.class,
+ TestManagedKeyDataCache.TestWithL2CacheAndNoDynamicLookup.class,
+ TestManagedKeyDataCache.TestWithL2CacheAndDynamicLookup.class, })
+@Category({ MasterTests.class, SmallTests.class })
+public class TestManagedKeyDataCache {
+ private static final String ALIAS = "cust1";
+ private static final byte[] CUST_ID = ALIAS.getBytes();
+ private static Class extends MockManagedKeyProvider> providerClass;
+
+ @Mock
+ private Server server;
+ @Spy
+ protected MockManagedKeyProvider testProvider;
+ protected ManagedKeyDataCache cache;
+ protected Configuration conf = HBaseConfiguration.create();
+
+ public static class ForwardingInterceptor {
+ static ThreadLocal delegate = new ThreadLocal<>();
+
+ static void setDelegate(MockManagedKeyProvider d) {
+ delegate.set(d);
+ }
+
+ @RuntimeType
+ public Object intercept(@Origin Method method, @AllArguments Object[] args) throws Throwable {
+ // Translate the InvocationTargetException that results when the provider throws an exception.
+ // This is actually not needed if the intercept is delegated directly to the spy.
+ try {
+ return method.invoke(delegate.get(), args); // calls the spy, triggering Mockito
+ } catch (InvocationTargetException e) {
+ throw e.getCause();
+ }
+ }
+ }
+
+ @BeforeClass
+ public static synchronized void setUpInterceptor() {
+ if (providerClass != null) {
+ return;
+ }
+ providerClass = new ByteBuddy().subclass(MockManagedKeyProvider.class)
+ .name("org.apache.hadoop.hbase.io.crypto.MockManagedKeyProviderSpy")
+ .method(ElementMatchers.any()) // Intercept all methods
+ // Using a delegator instead of directly forwarding to testProvider to
+ // facilitate switching the testProvider instance. Besides, it
+ .intercept(MethodDelegation.to(new ForwardingInterceptor())).make()
+ .load(MockManagedKeyProvider.class.getClassLoader(), ClassLoadingStrategy.Default.INJECTION)
+ .getLoaded();
+ }
+
+ @Before
+ public void setUp() {
+ MockitoAnnotations.openMocks(this);
+ ForwardingInterceptor.setDelegate(testProvider);
+
+ Encryption.clearKeyProviderCache();
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY, providerClass.getName());
+
+ // Configure the server mock to return the configuration
+ when(server.getConfiguration()).thenReturn(conf);
+
+ testProvider.setMultikeyGenMode(true);
+ }
+
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestGeneric {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestGeneric.class);
+
+ @Test
+ public void testEmptyCache() throws Exception {
+ ManagedKeyDataCache cache = new ManagedKeyDataCache(HBaseConfiguration.create(), null);
+ assertEquals(0, cache.getGenericCacheEntryCount());
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testActiveKeysCacheKeyEqualsAndHashCode() {
+ byte[] custodian1 = new byte[] { 1, 2, 3 };
+ byte[] custodian2 = new byte[] { 1, 2, 3 };
+ byte[] custodian3 = new byte[] { 4, 5, 6 };
+ String namespace1 = "ns1";
+ String namespace2 = "ns2";
+
+ // Reflexive
+ ManagedKeyDataCache.ActiveKeysCacheKey key1 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace1);
+ assertTrue(key1.equals(key1));
+
+ // Symmetric and consistent for equal content
+ ManagedKeyDataCache.ActiveKeysCacheKey key2 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian2, namespace1);
+ assertTrue(key1.equals(key2));
+ assertTrue(key2.equals(key1));
+ assertEquals(key1.hashCode(), key2.hashCode());
+
+ // Different custodian
+ ManagedKeyDataCache.ActiveKeysCacheKey key3 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace1);
+ assertFalse(key1.equals(key3));
+ assertFalse(key3.equals(key1));
+
+ // Different namespace
+ ManagedKeyDataCache.ActiveKeysCacheKey key4 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace2);
+ assertFalse(key1.equals(key4));
+ assertFalse(key4.equals(key1));
+
+ // Null and different class
+ assertFalse(key1.equals(null));
+ assertFalse(key1.equals("not a key"));
+
+ // Both fields different
+ ManagedKeyDataCache.ActiveKeysCacheKey key5 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace2);
+ assertFalse(key1.equals(key5));
+ assertFalse(key5.equals(key1));
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestWithoutL2Cache extends TestManagedKeyDataCache {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestWithoutL2Cache.class);
+
+ @Before
+ public void setUp() {
+ super.setUp();
+ cache = new ManagedKeyDataCache(conf, null);
+ }
+
+ @Test
+ public void testGenericCacheForInvalidMetadata() throws Exception {
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(testProvider).unwrapKey(any(String.class), any());
+ }
+
+ @Test
+ public void testWithInvalidProvider() throws Exception {
+ ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ doThrow(new IOException("Test exception")).when(testProvider).unwrapKey(any(String.class),
+ any());
+ // With no L2 and invalid provider, there will be no entry.
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null));
+ verify(testProvider).unwrapKey(any(String.class), any());
+ clearInvocations(testProvider);
+
+ // A second call to getEntry should not result in a call to the provider due to -ve entry.
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null));
+ verify(testProvider, never()).unwrapKey(any(String.class), any());
+
+ //
+ doThrow(new IOException("Test exception")).when(testProvider).getManagedKey(any(),
+ any(String.class));
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+
+ // A second call to getActiveEntry should not result in a call to the provider due to -ve
+ // entry.
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testGenericCache() throws Exception {
+ ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(globalKey1,
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ ManagedKeyData globalKey2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(globalKey2,
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey2.getKeyMetadata(), null));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ ManagedKeyData globalKey3 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(globalKey3,
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey3.getKeyMetadata(), null));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCache() throws Exception {
+ assertNotNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ ManagedKeyData activeKey = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(activeKey);
+ assertEquals(activeKey, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testGenericCacheOperations() throws Exception {
+ ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ ManagedKeyData nsKey1 = testProvider.getManagedKey(CUST_ID, "namespace1");
+ assertGenericCacheEntries(nsKey1, globalKey1);
+ ManagedKeyData globalKey2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertGenericCacheEntries(globalKey2, nsKey1, globalKey1);
+ ManagedKeyData nsKey2 = testProvider.getManagedKey(CUST_ID, "namespace1");
+ assertGenericCacheEntries(nsKey2, globalKey2, nsKey1, globalKey1);
+ }
+
+ @Test
+ public void testActiveKeyGetNoActive() throws Exception {
+ testProvider.setMockedKeyState(ALIAS, FAILED);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheOperations() throws Exception {
+ assertNotNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertNotNull(cache.getActiveEntry(CUST_ID, "namespace1"));
+ assertEquals(2, cache.getActiveCacheEntryCount());
+
+ cache.clearCache();
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ assertNotNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertEquals(1, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testGenericCacheUsingActiveKeysCacheOverProvider() throws Exception {
+ ManagedKeyData key = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key);
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(testProvider, never()).unwrapKey(any(String.class), any());
+ }
+
+ @Test
+ public void testThatActiveKeysCache_SkipsProvider_WhenLoadedViaGenericCache() throws Exception {
+ ManagedKeyData key1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(key1, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null));
+ ManagedKeyData key2 = testProvider.getManagedKey(CUST_ID, "namespace1");
+ assertEquals(key2, cache.getEntry(CUST_ID, "namespace1", key2.getKeyMetadata(), null));
+ verify(testProvider, times(2)).getManagedKey(any(), any(String.class));
+ assertEquals(2, cache.getActiveCacheEntryCount());
+ clearInvocations(testProvider);
+ assertEquals(key1, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertEquals(key2, cache.getActiveEntry(CUST_ID, "namespace1"));
+ // ACTIVE keys are automatically added to activeKeysCache when loaded
+ // via getEntry, so getActiveEntry will find them there and won't call the provider
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ cache.clearCache();
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testThatNonActiveKey_IsIgnored_WhenLoadedViaGenericCache() throws Exception {
+ testProvider.setMockedKeyState(ALIAS, FAILED);
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ assertEquals(0, cache.getActiveCacheEntryCount());
+
+ testProvider.setMockedKeyState(ALIAS, DISABLED);
+ key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ assertEquals(0, cache.getActiveCacheEntryCount());
+
+ testProvider.setMockedKeyState(ALIAS, INACTIVE);
+ key = testProvider.getManagedKey(CUST_ID, "namespace1");
+ assertEquals(key, cache.getEntry(CUST_ID, "namespace1", key.getKeyMetadata(), null));
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testActiveKeysCacheWithMultipleCustodiansInGenericCache() throws Exception {
+ ManagedKeyData key1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null));
+ String alias2 = "cust2";
+ byte[] cust_id2 = alias2.getBytes();
+ ManagedKeyData key2 = testProvider.getManagedKey(cust_id2, KEY_SPACE_GLOBAL);
+ assertNotNull(cache.getEntry(cust_id2, KEY_SPACE_GLOBAL, key2.getKeyMetadata(), null));
+ assertNotNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ // ACTIVE keys are automatically added to activeKeysCache when loaded.
+ assertEquals(2, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testActiveKeysCacheWithMultipleNamespaces() throws Exception {
+ ManagedKeyData key1 = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key1);
+ assertEquals(key1, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ ManagedKeyData key2 = cache.getActiveEntry(CUST_ID, "namespace1");
+ assertNotNull(key2);
+ assertEquals(key2, cache.getActiveEntry(CUST_ID, "namespace1"));
+ ManagedKeyData key3 = cache.getActiveEntry(CUST_ID, "namespace2");
+ assertNotNull(key3);
+ assertEquals(key3, cache.getActiveEntry(CUST_ID, "namespace2"));
+ verify(testProvider, times(3)).getManagedKey(any(), any(String.class));
+ assertEquals(3, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testEjectKey_ActiveKeysCacheOnly() throws Exception {
+ // Load a key into the active keys cache
+ ManagedKeyData key = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key);
+ assertEquals(1, cache.getActiveCacheEntryCount());
+
+ // Eject the key - should remove from active keys cache
+ boolean ejected = cache.ejectKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertTrue("Key should be ejected when metadata matches", ejected);
+ assertEquals(0, cache.getActiveCacheEntryCount());
+
+ // Try to eject again - should return false since it's already gone from active keys cache
+ boolean ejectedAgain = cache.ejectKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertFalse("Should return false when key is already ejected", ejectedAgain);
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testEjectKey_GenericCacheOnly() throws Exception {
+ // Load a key into the generic cache
+ ManagedKeyData key = cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL,
+ testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL).getKeyMetadata(), null);
+ assertNotNull(key);
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Eject the key - should remove from generic cache
+ boolean ejected = cache.ejectKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertTrue("Key should be ejected when metadata matches", ejected);
+ assertEquals(0, cache.getGenericCacheEntryCount());
+
+ // Try to eject again - should return false since it's already gone from generic cache
+ boolean ejectedAgain = cache.ejectKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertFalse("Should return false when key is already ejected", ejectedAgain);
+ assertEquals(0, cache.getGenericCacheEntryCount());
+ }
+
+ @Test
+ public void testEjectKey_Success() throws Exception {
+ // Load a key into the active keys cache
+ ManagedKeyData key = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key);
+ String metadata = key.getKeyMetadata();
+ assertEquals(1, cache.getActiveCacheEntryCount());
+
+ // Also load into the generic cache
+ ManagedKeyData keyFromGeneric = cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, metadata, null);
+ assertNotNull(keyFromGeneric);
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Eject the key with matching metadata - should remove from both caches
+ boolean ejected = cache.ejectKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertTrue("Key should be ejected when metadata matches", ejected);
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ assertEquals(0, cache.getGenericCacheEntryCount());
+
+ // Try to eject again - should return false since it's already gone from active keys cache
+ boolean ejectedAgain = cache.ejectKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertFalse("Should return false when key is already ejected", ejectedAgain);
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ assertEquals(0, cache.getGenericCacheEntryCount());
+ }
+
+ @Test
+ public void testEjectKey_MetadataMismatch() throws Exception {
+ // Load a key into both caches
+ ManagedKeyData key = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key);
+ assertEquals(1, cache.getActiveCacheEntryCount());
+
+ // Also load into the generic cache
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Try to eject with wrong metadata - should not eject from either cache
+ String wrongMetadata = "wrong-metadata";
+ boolean ejected = cache.ejectKey(CUST_ID, KEY_SPACE_GLOBAL,
+ ManagedKeyData.constructMetadataHash(wrongMetadata));
+ assertFalse("Key should not be ejected when metadata doesn't match", ejected);
+ assertEquals(1, cache.getActiveCacheEntryCount());
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Verify the key is still in both caches
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertEquals(key.getKeyMetadata(),
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash()).getKeyMetadata());
+ }
+
+ @Test
+ public void testEjectKey_KeyNotPresent() throws Exception {
+ // Try to eject a key that doesn't exist in the cache
+ String nonExistentMetadata = "non-existent-metadata";
+ boolean ejected = cache.ejectKey(CUST_ID, "non-existent-namespace",
+ ManagedKeyData.constructMetadataHash(nonExistentMetadata));
+ assertFalse("Should return false when key is not present", ejected);
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testEjectKey_MultipleKeys() throws Exception {
+ // Load multiple keys into both caches
+ ManagedKeyData key1 = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ ManagedKeyData key2 = cache.getActiveEntry(CUST_ID, "namespace1");
+ ManagedKeyData key3 = cache.getActiveEntry(CUST_ID, "namespace2");
+ assertNotNull(key1);
+ assertNotNull(key2);
+ assertNotNull(key3);
+ assertEquals(3, cache.getActiveCacheEntryCount());
+
+ // Also load all keys into the generic cache
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null);
+ cache.getEntry(CUST_ID, "namespace1", key2.getKeyMetadata(), null);
+ cache.getEntry(CUST_ID, "namespace2", key3.getKeyMetadata(), null);
+ assertEquals(3, cache.getGenericCacheEntryCount());
+
+ // Eject only the middle key from both caches
+ boolean ejected = cache.ejectKey(CUST_ID, "namespace1", key2.getKeyMetadataHash());
+ assertTrue("Key should be ejected from both caches", ejected);
+ assertEquals(2, cache.getActiveCacheEntryCount());
+ assertEquals(2, cache.getGenericCacheEntryCount());
+
+ // Verify only key2 was ejected - key1 and key3 should still be there
+ clearInvocations(testProvider);
+ assertEquals(key1, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertEquals(key3, cache.getActiveEntry(CUST_ID, "namespace2"));
+ // These getActiveEntry() calls should not trigger provider calls since keys are still cached
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+
+ // Verify generic cache still has key1 and key3
+ assertEquals(key1.getKeyMetadata(),
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null).getKeyMetadata());
+ assertEquals(key3.getKeyMetadata(),
+ cache.getEntry(CUST_ID, "namespace2", key3.getKeyMetadata(), null).getKeyMetadata());
+
+ // Try to eject key2 again - should return false since it's already gone from both caches
+ boolean ejectedAgain = cache.ejectKey(CUST_ID, "namespace1", key2.getKeyMetadataHash());
+ assertFalse("Should return false when key is already ejected", ejectedAgain);
+ assertEquals(2, cache.getActiveCacheEntryCount());
+ assertEquals(2, cache.getGenericCacheEntryCount());
+ }
+
+ @Test
+ public void testEjectKey_DifferentCustodian() throws Exception {
+ // Load a key for one custodian into both caches
+ ManagedKeyData key = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key);
+ String metadata = key.getKeyMetadata();
+ assertEquals(1, cache.getActiveCacheEntryCount());
+
+ // Also load into the generic cache
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Try to eject with a different custodian - should not eject from either cache
+ byte[] differentCustodian = "different-cust".getBytes();
+ boolean ejected =
+ cache.ejectKey(differentCustodian, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertFalse("Should not eject key for different custodian", ejected);
+ assertEquals(1, cache.getActiveCacheEntryCount());
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Verify the original key is still in both caches
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertEquals(metadata,
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, metadata, null).getKeyMetadata());
+ }
+
+ @Test
+ public void testEjectKey_AfterClearCache() throws Exception {
+ // Load a key into both caches
+ ManagedKeyData key = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key);
+ String metadata = key.getKeyMetadata();
+ assertEquals(1, cache.getActiveCacheEntryCount());
+
+ // Also load into the generic cache
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, metadata, null);
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Clear both caches
+ cache.clearCache();
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ assertEquals(0, cache.getGenericCacheEntryCount());
+
+ // Try to eject the key after both caches are cleared
+ boolean ejected = cache.ejectKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash());
+ assertFalse("Should return false when both caches are empty", ejected);
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ assertEquals(0, cache.getGenericCacheEntryCount());
+ }
+
+ @Test
+ public void testGetEntry_HashCollisionOrMismatchDetection() throws Exception {
+ // Create a key and get it into the cache
+ ManagedKeyData key1 = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key1);
+
+ // Now simulate a hash collision by trying to get an entry with the same hash
+ // but different custodian/namespace
+ byte[] differentCust = "different-cust".getBytes();
+ String differentNamespace = "different-namespace";
+
+ // This should return null due to custodian/namespace mismatch (collision detection)
+ ManagedKeyData result =
+ cache.getEntry(differentCust, differentNamespace, key1.getKeyMetadata(), null);
+
+ // Result should be null because of hash collision detection
+ // The cache finds an entry with the same metadata hash, but custodian/namespace don't match
+ assertNull("Should return null when hash collision is detected", result);
+ }
+
+ @Test
+ public void testEjectKey_HashCollisionOrMismatchProtection() throws Exception {
+ // Create two keys with potential hash collision scenario
+ byte[] cust1 = "cust1".getBytes();
+ byte[] cust2 = "cust2".getBytes();
+ String namespace1 = "namespace1";
+
+ // Load a key for cust1
+ ManagedKeyData key1 = cache.getActiveEntry(cust1, namespace1);
+ assertNotNull(key1);
+ assertEquals(1, cache.getActiveCacheEntryCount());
+
+ // Try to eject using same metadata hash but different custodian
+ // This should not eject the key due to custodian mismatch protection
+ boolean ejected = cache.ejectKey(cust2, namespace1, key1.getKeyMetadataHash());
+ assertFalse("Should not eject key with different custodian even if hash matches", ejected);
+ assertEquals(1, cache.getActiveCacheEntryCount());
+
+ // Verify the original key is still there
+ assertEquals(key1, cache.getActiveEntry(cust1, namespace1));
+ }
+
+ @Test
+ public void testEjectKey_HashCollisionInBothCaches() throws Exception {
+ // This test covers the scenario where rejectedValue is set during the first cache check
+ // (activeKeysCache) and then the second cache check (cacheByMetadataHash) takes the
+ // early return path because rejectedValue is already set.
+ byte[] cust1 = "cust1".getBytes();
+ byte[] cust2 = "cust2".getBytes();
+ String namespace1 = "namespace1";
+
+ // Load a key for cust1 - this will put it in BOTH activeKeysCache and cacheByMetadataHash
+ ManagedKeyData key1 = cache.getActiveEntry(cust1, namespace1);
+ assertNotNull(key1);
+
+ // Also access via generic cache to ensure it's in both caches
+ ManagedKeyData key1viaGeneric =
+ cache.getEntry(cust1, namespace1, key1.getKeyMetadata(), null);
+ assertNotNull(key1viaGeneric);
+ assertEquals(key1, key1viaGeneric);
+
+ // Verify both cache counts
+ assertEquals(1, cache.getActiveCacheEntryCount());
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Try to eject using same metadata hash but different custodian
+ // This will trigger the collision detection in BOTH caches:
+ // 1. First check in activeKeysCache will detect mismatch and set rejectedValue
+ // 2. Second check in cacheByMetadataHash should take early return (line 234)
+ boolean ejected = cache.ejectKey(cust2, namespace1, key1.getKeyMetadataHash());
+ assertFalse("Should not eject key with different custodian even if hash matches", ejected);
+
+ // Verify both caches still have the entry
+ assertEquals(1, cache.getActiveCacheEntryCount());
+ assertEquals(1, cache.getGenericCacheEntryCount());
+
+ // Verify the original key is still accessible
+ assertEquals(key1, cache.getActiveEntry(cust1, namespace1));
+ assertEquals(key1, cache.getEntry(cust1, namespace1, key1.getKeyMetadata(), null));
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestWithL2CacheAndNoDynamicLookup extends TestManagedKeyDataCache {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestWithL2CacheAndNoDynamicLookup.class);
+ private KeymetaTableAccessor mockL2 = mock(KeymetaTableAccessor.class);
+
+ @Before
+ public void setUp() {
+ super.setUp();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, false);
+ cache = new ManagedKeyDataCache(conf, mockL2);
+ }
+
+ @Test
+ public void testGenericCacheNonExistentKeyInL2Cache() throws Exception {
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2).getKey(any(), any(String.class), any(byte[].class));
+ clearInvocations(mockL2);
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2, never()).getKey(any(), any(String.class), any(byte[].class));
+ }
+
+ @Test
+ public void testGenericCacheRetrievalFromL2Cache() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadataHash())).thenReturn(key);
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(mockL2).getKey(any(), any(String.class), any(byte[].class));
+ }
+
+ @Test
+ public void testActiveKeysCacheNonExistentKeyInL2Cache() throws Exception {
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getKeyManagementStateMarker(any(), any(String.class));
+ clearInvocations(mockL2);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2, never()).getKeyManagementStateMarker(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheRetrievalFromL2Cache() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ when(mockL2.getKeyManagementStateMarker(CUST_ID, KEY_SPACE_GLOBAL)).thenReturn(key);
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getKeyManagementStateMarker(any(), any(String.class));
+ clearInvocations(mockL2);
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2, never()).getKeyManagementStateMarker(any(), any(String.class));
+ }
+
+ @Test
+ public void testGenericCacheWithKeymetaAccessorException() throws Exception {
+ when(mockL2.getKey(eq(CUST_ID), eq(KEY_SPACE_GLOBAL), any(byte[].class)))
+ .thenThrow(new IOException("Test exception"));
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2).getKey(any(), any(String.class), any(byte[].class));
+ clearInvocations(mockL2);
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2, never()).getKey(any(), any(String.class), any(byte[].class));
+ }
+
+ @Test
+ public void testGetActiveEntryWithKeymetaAccessorException() throws Exception {
+ when(mockL2.getKeyManagementStateMarker(CUST_ID, KEY_SPACE_GLOBAL))
+ .thenThrow(new IOException("Test exception"));
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getKeyManagementStateMarker(any(), any(String.class));
+ clearInvocations(mockL2);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2, never()).getKeyManagementStateMarker(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheUsesKeymetaAccessorWhenGenericCacheEmpty() throws Exception {
+ // Ensure generic cache is empty
+ cache.clearCache();
+
+ // Mock the keymetaAccessor to return a key
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ when(mockL2.getKeyManagementStateMarker(CUST_ID, KEY_SPACE_GLOBAL)).thenReturn(key);
+
+ // Get the active entry - it should call keymetaAccessor since generic cache is empty
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getKeyManagementStateMarker(any(), any(String.class));
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestWithL2CacheAndDynamicLookup extends TestManagedKeyDataCache {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestWithL2CacheAndDynamicLookup.class);
+ private KeymetaTableAccessor mockL2 = mock(KeymetaTableAccessor.class);
+
+ @Before
+ public void setUp() {
+ super.setUp();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, true);
+ cache = new ManagedKeyDataCache(conf, mockL2);
+ }
+
+ @Test
+ public void testGenericCacheRetrivalFromProviderWhenKeyNotFoundInL2Cache() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ doReturn(key).when(testProvider).unwrapKey(any(String.class), any());
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(mockL2).getKey(any(), any(String.class), any(byte[].class));
+ verify(mockL2).addKey(any(ManagedKeyData.class));
+ }
+
+ @Test
+ public void testAddKeyFailure() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ doReturn(key).when(testProvider).unwrapKey(any(String.class), any());
+ doThrow(new IOException("Test exception")).when(mockL2).addKey(any(ManagedKeyData.class));
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(mockL2).addKey(any(ManagedKeyData.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheDynamicLookupWithUnexpectedException() throws Exception {
+ doThrow(new RuntimeException("Test exception")).when(testProvider).getManagedKey(any(),
+ any(String.class));
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ // A 2nd invocation should not result in a call to the provider.
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheRetrivalFromProviderWhenKeyNotFoundInL2Cache() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ doReturn(key).when(testProvider).getManagedKey(any(), any(String.class));
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getKeyManagementStateMarker(any(), any(String.class));
+ }
+
+ @Test
+ public void testGenericCacheUsesActiveKeysCacheFirst() throws Exception {
+ // First populate the active keys cache with an active key
+ ManagedKeyData key1 = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+
+ // Now get the generic cache entry - it should use the active keys cache first, not call
+ // keymetaAccessor
+ assertEquals(key1, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+
+ // Lookup a diffrent key.
+ ManagedKeyData key2 = cache.getActiveEntry(CUST_ID, "namespace1");
+ assertNotEquals(key1, key2);
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+
+ // Now get the generic cache entry - it should use the active keys cache first, not call
+ // keymetaAccessor
+ assertEquals(key2, cache.getEntry(CUST_ID, "namespace1", key2.getKeyMetadata(), null));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testGetOlderEntryFromGenericCache() throws Exception {
+ // Get one version of the key in to ActiveKeysCache
+ ManagedKeyData key1 = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key1);
+ clearInvocations(testProvider);
+
+ // Now try to lookup another version of the key, it should lookup and discard the active key.
+ ManagedKeyData key2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(key2, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key2.getKeyMetadata(), null));
+ verify(testProvider).unwrapKey(any(String.class), any());
+ }
+
+ @Test
+ public void testThatActiveKeysCache_PopulatedByGenericCache() throws Exception {
+ // First populate the generic cache with an active key
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(testProvider).unwrapKey(any(String.class), any());
+
+ // Clear invocations to reset the mock state
+ clearInvocations(testProvider);
+
+ // Now get the active entry - it should already be there due to the generic cache first
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).unwrapKey(any(String.class), any());
+ }
+ }
+
+ protected void assertGenericCacheEntries(ManagedKeyData... keys) throws Exception {
+ for (ManagedKeyData key : keys) {
+ assertEquals(key,
+ cache.getEntry(key.getKeyCustodian(), key.getKeyNamespace(), key.getKeyMetadata(), null));
+ }
+ assertEquals(keys.length, cache.getGenericCacheEntryCount());
+ int activeKeysCount =
+ Arrays.stream(keys).filter(key -> key.getKeyState() == ManagedKeyState.ACTIVE)
+ .map(key -> new ManagedKeyDataCache.ActiveKeysCacheKey(key.getKeyCustodian(),
+ key.getKeyNamespace()))
+ .collect(Collectors.toSet()).size();
+ assertEquals(activeKeysCount, cache.getActiveCacheEntryCount());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
new file mode 100644
index 000000000000..d04dee3853e9
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
@@ -0,0 +1,443 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.security.KeyException;
+import java.util.List;
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ManagedKeysProtos;
+
+/**
+ * Tests the admin API via both RPC and local calls.
+ */
+@Category({ MasterTests.class, MediumTests.class })
+public class TestManagedKeymeta extends ManagedKeyTestBase {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagedKeymeta.class);
+
+ /**
+ * Functional interface for setup operations that can throw ServiceException.
+ */
+ @FunctionalInterface
+ interface SetupFunction {
+ void setup(ManagedKeysProtos.ManagedKeysService.BlockingInterface mockStub,
+ ServiceException networkError) throws ServiceException;
+ }
+
+ /**
+ * Functional interface for test operations that can throw checked exceptions.
+ */
+ @FunctionalInterface
+ interface TestFunction {
+ void test(KeymetaAdminClient client) throws IOException, KeyException;
+ }
+
+ @Test
+ public void testEnableLocal() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin();
+ doTestEnable(keymetaAdmin);
+ }
+
+ @Test
+ public void testEnableOverRPC() throws Exception {
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ doTestEnable(adminClient);
+ }
+
+ private void doTestEnable(KeymetaAdmin adminClient) throws IOException, KeyException {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(master.getConfiguration());
+ managedKeyProvider.setMultikeyGenMode(true);
+ String cust = "cust1";
+ byte[] custBytes = cust.getBytes();
+ ManagedKeyData managedKey =
+ adminClient.enableKeyManagement(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyDataSingleKey(managedKey, ManagedKeyState.ACTIVE);
+
+ // Enable must have persisted the key, but it won't be read back until we call into the cache.
+ // We have the multi key gen mode enabled, but since the key should be loaded from L2, we
+ // should get the same key even after ejecting it.
+ HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
+ ManagedKeyDataCache managedKeyDataCache = regionServer.getManagedKeyDataCache();
+ ManagedKeyData activeEntry =
+ managedKeyDataCache.getActiveEntry(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull(activeEntry);
+ assertTrue(Bytes.equals(managedKey.getKeyMetadataHash(), activeEntry.getKeyMetadataHash()));
+ assertTrue(managedKeyDataCache.ejectKey(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL,
+ managedKey.getKeyMetadataHash()));
+ activeEntry = managedKeyDataCache.getActiveEntry(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull(activeEntry);
+ assertTrue(Bytes.equals(managedKey.getKeyMetadataHash(), activeEntry.getKeyMetadataHash()));
+
+ List managedKeys =
+ adminClient.getManagedKeys(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertEquals(managedKeyProvider.getLastGeneratedKeyData(cust, ManagedKeyData.KEY_SPACE_GLOBAL)
+ .createClientFacingInstance(), managedKeys.get(0).createClientFacingInstance());
+
+ String nonExistentCust = "nonExistentCust";
+ byte[] nonExistentBytes = nonExistentCust.getBytes();
+ managedKeyProvider.setMockedKeyState(nonExistentCust, ManagedKeyState.FAILED);
+ ManagedKeyData managedKey1 =
+ adminClient.enableKeyManagement(nonExistentBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyDataSingleKey(managedKey1, ManagedKeyState.FAILED);
+
+ String disabledCust = "disabledCust";
+ byte[] disabledBytes = disabledCust.getBytes();
+ managedKeyProvider.setMockedKeyState(disabledCust, ManagedKeyState.DISABLED);
+ ManagedKeyData managedKey2 =
+ adminClient.enableKeyManagement(disabledBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyDataSingleKey(managedKey2, ManagedKeyState.DISABLED);
+ }
+
+ private static void assertKeyDataSingleKey(ManagedKeyData managedKeyState,
+ ManagedKeyState keyState) {
+ assertNotNull(managedKeyState);
+ assertEquals(keyState, managedKeyState.getKeyState());
+ }
+
+ @Test
+ public void testEnableKeyManagementWithExceptionOnGetManagedKey() throws Exception {
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(TEST_UTIL.getConfiguration());
+ managedKeyProvider.setShouldThrowExceptionOnGetManagedKey(true);
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ IOException exception = assertThrows(IOException.class,
+ () -> adminClient.enableKeyManagement(new byte[0], "namespace"));
+ assertTrue(exception.getMessage().contains("key_cust must not be empty"));
+ }
+
+ @Test
+ public void testEnableKeyManagementWithClientSideServiceException() throws Exception {
+ doTestWithClientSideServiceException((mockStub,
+ networkError) -> when(mockStub.enableKeyManagement(any(), any())).thenThrow(networkError),
+ (client) -> client.enableKeyManagement(new byte[0], "namespace"));
+ }
+
+ @Test
+ public void testGetManagedKeysWithClientSideServiceException() throws Exception {
+ // Similar test for getManagedKeys method
+ doTestWithClientSideServiceException((mockStub,
+ networkError) -> when(mockStub.getManagedKeys(any(), any())).thenThrow(networkError),
+ (client) -> client.getManagedKeys(new byte[0], "namespace"));
+ }
+
+ @Test
+ public void testRotateSTKLocal() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin();
+ doTestRotateSTK(keymetaAdmin);
+ }
+
+ @Test
+ public void testRotateSTKOverRPC() throws Exception {
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ doTestRotateSTK(adminClient);
+ }
+
+ private void doTestRotateSTK(KeymetaAdmin adminClient) throws IOException {
+ // Call rotateSTK - since no actual system key change has occurred,
+ // this should return false (no rotation performed)
+ boolean result = adminClient.rotateSTK();
+ assertFalse("rotateSTK should return false when no key change is detected", result);
+
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ ManagedKeyData currentSystemKey = master.getSystemKeyCache().getLatestSystemKey();
+
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(TEST_UTIL.getConfiguration());
+ // Once we enable multikeyGenMode on MockManagedKeyProvider, every call should return a new key
+ // which should trigger a rotation.
+ managedKeyProvider.setMultikeyGenMode(true);
+ result = adminClient.rotateSTK();
+ assertTrue("rotateSTK should return true when a new key is detected", result);
+
+ ManagedKeyData newSystemKey = master.getSystemKeyCache().getLatestSystemKey();
+ assertNotEquals("newSystemKey should be different from currentSystemKey", currentSystemKey,
+ newSystemKey);
+
+ HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
+ assertEquals("regionServer should have the same new system key", newSystemKey,
+ regionServer.getSystemKeyCache().getLatestSystemKey());
+
+ }
+
+ @Test
+ public void testRotateSTKWithExceptionOnGetSystemKey() throws Exception {
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(TEST_UTIL.getConfiguration());
+ managedKeyProvider.setShouldThrowExceptionOnGetSystemKey(true);
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ IOException exception = assertThrows(IOException.class, () -> adminClient.rotateSTK());
+ assertTrue(exception.getMessage().contains("Test exception on getSystemKey"));
+ }
+
+ @Test
+ public void testRotateSTKWithClientSideServiceException() throws Exception {
+ doTestWithClientSideServiceException(
+ (mockStub, networkError) -> when(mockStub.rotateSTK(any(), any())).thenThrow(networkError),
+ (client) -> client.rotateSTK());
+ }
+
+ private void doTestWithClientSideServiceException(SetupFunction setupFunction,
+ TestFunction testFunction) throws Exception {
+ ManagedKeysProtos.ManagedKeysService.BlockingInterface mockStub =
+ mock(ManagedKeysProtos.ManagedKeysService.BlockingInterface.class);
+
+ ServiceException networkError = new ServiceException("Network error");
+ networkError.initCause(new IOException("Network error"));
+
+ KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ // Use reflection to set the stub
+ Field stubField = KeymetaAdminClient.class.getDeclaredField("stub");
+ stubField.setAccessible(true);
+ stubField.set(client, mockStub);
+
+ // Setup the mock
+ setupFunction.setup(mockStub, networkError);
+
+ // Execute test function and expect IOException
+ IOException exception = assertThrows(IOException.class, () -> testFunction.test(client));
+
+ assertTrue(exception.getMessage().contains("Network error"));
+ }
+
+ @Test
+ public void testDisableKeyManagementLocal() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin();
+ doTestDisableKeyManagement(keymetaAdmin);
+ }
+
+ @Test
+ public void testDisableKeyManagementOverRPC() throws Exception {
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ doTestDisableKeyManagement(adminClient);
+ }
+
+ private void doTestDisableKeyManagement(KeymetaAdmin adminClient)
+ throws IOException, KeyException {
+ String cust = "cust2";
+ byte[] custBytes = cust.getBytes();
+
+ // First enable key management
+ ManagedKeyData managedKey =
+ adminClient.enableKeyManagement(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull(managedKey);
+ assertKeyDataSingleKey(managedKey, ManagedKeyState.ACTIVE);
+
+ // Now disable it
+ ManagedKeyData disabledKey =
+ adminClient.disableKeyManagement(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull(disabledKey);
+ assertEquals(ManagedKeyState.DISABLED, disabledKey.getKeyState().getExternalState());
+ }
+
+ @Test
+ public void testDisableKeyManagementWithClientSideServiceException() throws Exception {
+ doTestWithClientSideServiceException(
+ (mockStub, networkError) -> when(mockStub.disableKeyManagement(any(), any()))
+ .thenThrow(networkError),
+ (client) -> client.disableKeyManagement(new byte[0], "namespace"));
+ }
+
+ @Test
+ public void testDisableManagedKeyLocal() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin();
+ doTestDisableManagedKey(keymetaAdmin);
+ }
+
+ @Test
+ public void testDisableManagedKeyOverRPC() throws Exception {
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ doTestDisableManagedKey(adminClient);
+ }
+
+ private void doTestDisableManagedKey(KeymetaAdmin adminClient) throws IOException, KeyException {
+ String cust = "cust3";
+ byte[] custBytes = cust.getBytes();
+
+ // First enable key management to create a key
+ ManagedKeyData managedKey =
+ adminClient.enableKeyManagement(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull(managedKey);
+ assertKeyDataSingleKey(managedKey, ManagedKeyState.ACTIVE);
+ byte[] keyMetadataHash = managedKey.getKeyMetadataHash();
+
+ // Now disable the specific key
+ ManagedKeyData disabledKey =
+ adminClient.disableManagedKey(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL, keyMetadataHash);
+ assertNotNull(disabledKey);
+ assertEquals(ManagedKeyState.DISABLED, disabledKey.getKeyState().getExternalState());
+ }
+
+ @Test
+ public void testDisableManagedKeyWithClientSideServiceException() throws Exception {
+ doTestWithClientSideServiceException(
+ (mockStub, networkError) -> when(mockStub.disableManagedKey(any(), any()))
+ .thenThrow(networkError),
+ (client) -> client.disableManagedKey(new byte[0], "namespace", new byte[0]));
+ }
+
+ @Test
+ public void testRotateManagedKeyWithClientSideServiceException() throws Exception {
+ doTestWithClientSideServiceException((mockStub,
+ networkError) -> when(mockStub.rotateManagedKey(any(), any())).thenThrow(networkError),
+ (client) -> client.rotateManagedKey(new byte[0], "namespace"));
+ }
+
+ @Test
+ public void testRefreshManagedKeysWithClientSideServiceException() throws Exception {
+ doTestWithClientSideServiceException((mockStub,
+ networkError) -> when(mockStub.refreshManagedKeys(any(), any())).thenThrow(networkError),
+ (client) -> client.refreshManagedKeys(new byte[0], "namespace"));
+ }
+
+ @Test
+ public void testRotateManagedKeyLocal() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin();
+ doTestRotateManagedKey(keymetaAdmin);
+ }
+
+ @Test
+ public void testRotateManagedKeyOverRPC() throws Exception {
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ doTestRotateManagedKey(adminClient);
+ }
+
+ private void doTestRotateManagedKey(KeymetaAdmin adminClient) throws IOException, KeyException {
+ // This test covers the success path (line 133 in KeymetaAdminClient for RPC)
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(master.getConfiguration());
+ managedKeyProvider.setMultikeyGenMode(true);
+
+ String cust = "cust1";
+ byte[] custBytes = cust.getBytes();
+
+ // Enable key management first to have a key to rotate
+ adminClient.enableKeyManagement(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+
+ // Now rotate the key
+ ManagedKeyData rotatedKey =
+ adminClient.rotateManagedKey(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+
+ assertNotNull("Rotated key should not be null", rotatedKey);
+ assertEquals("Rotated key should be ACTIVE", ManagedKeyState.ACTIVE, rotatedKey.getKeyState());
+ assertEquals("Rotated key should have correct custodian", 0,
+ Bytes.compareTo(custBytes, rotatedKey.getKeyCustodian()));
+ assertEquals("Rotated key should have correct namespace", ManagedKeyData.KEY_SPACE_GLOBAL,
+ rotatedKey.getKeyNamespace());
+ }
+
+ @Test
+ public void testRefreshManagedKeysLocal() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin();
+ doTestRefreshManagedKeys(keymetaAdmin);
+ }
+
+ @Test
+ public void testRefreshManagedKeysOverRPC() throws Exception {
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ doTestRefreshManagedKeys(adminClient);
+ }
+
+ private void doTestRefreshManagedKeys(KeymetaAdmin adminClient) throws IOException, KeyException {
+ // This test covers the success path (line 148 in KeymetaAdminClient for RPC)
+ String cust = "cust1";
+ byte[] custBytes = cust.getBytes();
+
+ // Enable key management first to have keys to refresh
+ adminClient.enableKeyManagement(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+
+ // Should complete without exception - covers the normal return path
+ adminClient.refreshManagedKeys(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+
+ // Verify keys still exist after refresh
+ List keys =
+ adminClient.getManagedKeys(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull("Keys should exist after refresh", keys);
+ assertFalse("Should have at least one key after refresh", keys.isEmpty());
+ }
+
+ // ========== NotImplementedException Tests ==========
+
+ @Test
+ public void testEjectManagedKeyDataCacheEntryNotSupported() throws Exception {
+ // This test covers lines 89-90 in KeymetaAdminClient
+ KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ String cust = "cust1";
+ byte[] custBytes = cust.getBytes();
+
+ NotImplementedException exception = assertThrows(NotImplementedException.class, () -> client
+ .ejectManagedKeyDataCacheEntry(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL, "metadata"));
+
+ assertTrue("Exception message should indicate method is not supported",
+ exception.getMessage().contains("ejectManagedKeyDataCacheEntry not supported"));
+ assertTrue("Exception message should mention KeymetaAdminClient",
+ exception.getMessage().contains("KeymetaAdminClient"));
+ }
+
+ @Test
+ public void testClearManagedKeyDataCacheNotSupported() throws Exception {
+ // This test covers lines 95-96 in KeymetaAdminClient
+ KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection());
+
+ NotImplementedException exception =
+ assertThrows(NotImplementedException.class, () -> client.clearManagedKeyDataCache());
+
+ assertTrue("Exception message should indicate method is not supported",
+ exception.getMessage().contains("clearManagedKeyDataCache not supported"));
+ assertTrue("Exception message should mention KeymetaAdminClient",
+ exception.getMessage().contains("KeymetaAdminClient"));
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
new file mode 100644
index 000000000000..f541d4bac18c
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
@@ -0,0 +1,310 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Key;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import javax.crypto.spec.SecretKeySpec;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+/**
+ * Tests for SystemKeyCache class. NOTE: The createCache() method is tested in
+ * TestKeyManagementService.
+ */
+@Category({ MasterTests.class, SmallTests.class })
+public class TestSystemKeyCache {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSystemKeyCache.class);
+
+ @Mock
+ private SystemKeyAccessor mockAccessor;
+
+ private static final byte[] TEST_CUSTODIAN = "test-custodian".getBytes();
+ private static final String TEST_NAMESPACE = "test-namespace";
+ private static final String TEST_METADATA_1 = "metadata-1";
+ private static final String TEST_METADATA_2 = "metadata-2";
+ private static final String TEST_METADATA_3 = "metadata-3";
+
+ private Key testKey1;
+ private Key testKey2;
+ private Key testKey3;
+ private ManagedKeyData keyData1;
+ private ManagedKeyData keyData2;
+ private ManagedKeyData keyData3;
+ private Path keyPath1;
+ private Path keyPath2;
+ private Path keyPath3;
+
+ @Before
+ public void setUp() {
+ MockitoAnnotations.openMocks(this);
+
+ // Create test keys
+ testKey1 = new SecretKeySpec("test-key-1-bytes".getBytes(), "AES");
+ testKey2 = new SecretKeySpec("test-key-2-bytes".getBytes(), "AES");
+ testKey3 = new SecretKeySpec("test-key-3-bytes".getBytes(), "AES");
+
+ // Create test key data with different checksums
+ keyData1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey1, ManagedKeyState.ACTIVE,
+ TEST_METADATA_1, 1000L);
+ keyData2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey2, ManagedKeyState.ACTIVE,
+ TEST_METADATA_2, 2000L);
+ keyData3 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey3, ManagedKeyState.ACTIVE,
+ TEST_METADATA_3, 3000L);
+
+ // Create test paths
+ keyPath1 = new Path("/system/keys/key1");
+ keyPath2 = new Path("/system/keys/key2");
+ keyPath3 = new Path("/system/keys/key3");
+ }
+
+ @Test
+ public void testCreateCacheWithSingleSystemKey() throws Exception {
+ // Setup
+ List keyPaths = Collections.singletonList(keyPath1);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNotNull(cache);
+ assertSame(keyData1, cache.getLatestSystemKey());
+ assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum()));
+ assertNull(cache.getSystemKeyByChecksum(999L)); // Non-existent checksum
+
+ verify(mockAccessor).getAllSystemKeyFiles();
+ verify(mockAccessor).loadSystemKey(keyPath1);
+ }
+
+ @Test
+ public void testCreateCacheWithMultipleSystemKeys() throws Exception {
+ // Setup - keys should be processed in order, first one becomes latest
+ List keyPaths = Arrays.asList(keyPath1, keyPath2, keyPath3);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+ when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2);
+ when(mockAccessor.loadSystemKey(keyPath3)).thenReturn(keyData3);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNotNull(cache);
+ assertSame(keyData1, cache.getLatestSystemKey()); // First key becomes latest
+
+ // All keys should be accessible by checksum
+ assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum()));
+ assertSame(keyData2, cache.getSystemKeyByChecksum(keyData2.getKeyChecksum()));
+ assertSame(keyData3, cache.getSystemKeyByChecksum(keyData3.getKeyChecksum()));
+
+ // Non-existent checksum should return null
+ assertNull(cache.getSystemKeyByChecksum(999L));
+
+ verify(mockAccessor).getAllSystemKeyFiles();
+ verify(mockAccessor).loadSystemKey(keyPath1);
+ verify(mockAccessor).loadSystemKey(keyPath2);
+ verify(mockAccessor).loadSystemKey(keyPath3);
+ }
+
+ @Test
+ public void testCreateCacheWithNoSystemKeyFiles() throws Exception {
+ // Setup - this covers the uncovered lines 46-47
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(Collections.emptyList());
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNull(cache);
+ verify(mockAccessor).getAllSystemKeyFiles();
+ }
+
+ @Test
+ public void testCreateCacheWithEmptyKeyFilesList() throws Exception {
+ // Setup - alternative empty scenario
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(new ArrayList<>());
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNull(cache);
+ verify(mockAccessor).getAllSystemKeyFiles();
+ }
+
+ @Test
+ public void testGetLatestSystemKeyConsistency() throws Exception {
+ // Setup
+ List keyPaths = Arrays.asList(keyPath1, keyPath2);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+ when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify - latest key should be consistent across calls
+ ManagedKeyData latest1 = cache.getLatestSystemKey();
+ ManagedKeyData latest2 = cache.getLatestSystemKey();
+
+ assertNotNull(latest1);
+ assertSame(latest1, latest2);
+ assertSame(keyData1, latest1); // First key should be latest
+ }
+
+ @Test
+ public void testGetSystemKeyByChecksumWithDifferentKeys() throws Exception {
+ // Setup
+ List keyPaths = Arrays.asList(keyPath1, keyPath2, keyPath3);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+ when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2);
+ when(mockAccessor.loadSystemKey(keyPath3)).thenReturn(keyData3);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify each key can be retrieved by its unique checksum
+ long checksum1 = keyData1.getKeyChecksum();
+ long checksum2 = keyData2.getKeyChecksum();
+ long checksum3 = keyData3.getKeyChecksum();
+
+ // Checksums should be different
+ assert checksum1 != checksum2;
+ assert checksum2 != checksum3;
+ assert checksum1 != checksum3;
+
+ // Each key should be retrievable by its checksum
+ assertSame(keyData1, cache.getSystemKeyByChecksum(checksum1));
+ assertSame(keyData2, cache.getSystemKeyByChecksum(checksum2));
+ assertSame(keyData3, cache.getSystemKeyByChecksum(checksum3));
+ }
+
+ @Test
+ public void testGetSystemKeyByChecksumWithNonExistentChecksum() throws Exception {
+ // Setup
+ List keyPaths = Collections.singletonList(keyPath1);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNotNull(cache);
+
+ // Test various non-existent checksums
+ assertNull(cache.getSystemKeyByChecksum(0L));
+ assertNull(cache.getSystemKeyByChecksum(-1L));
+ assertNull(cache.getSystemKeyByChecksum(Long.MAX_VALUE));
+ assertNull(cache.getSystemKeyByChecksum(Long.MIN_VALUE));
+
+ // But the actual checksum should work
+ assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum()));
+ }
+
+ @Test(expected = IOException.class)
+ public void testCreateCacheWithAccessorIOException() throws Exception {
+ // Setup - accessor throws IOException
+ when(mockAccessor.getAllSystemKeyFiles()).thenThrow(new IOException("File system error"));
+
+ // Execute - should propagate the IOException
+ SystemKeyCache.createCache(mockAccessor);
+ }
+
+ @Test(expected = IOException.class)
+ public void testCreateCacheWithLoadSystemKeyIOException() throws Exception {
+ // Setup - loading key throws IOException
+ List keyPaths = Collections.singletonList(keyPath1);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenThrow(new IOException("Key load error"));
+
+ // Execute - should propagate the IOException
+ SystemKeyCache.createCache(mockAccessor);
+ }
+
+ @Test
+ public void testCacheWithKeysHavingSameChecksum() throws Exception {
+ // Setup - create two keys that will have the same checksum (same content)
+ Key sameKey1 = new SecretKeySpec("identical-bytes".getBytes(), "AES");
+ Key sameKey2 = new SecretKeySpec("identical-bytes".getBytes(), "AES");
+
+ ManagedKeyData sameManagedKey1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, sameKey1,
+ ManagedKeyState.ACTIVE, "metadata-A", 1000L);
+ ManagedKeyData sameManagedKey2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, sameKey2,
+ ManagedKeyState.ACTIVE, "metadata-B", 2000L);
+
+ // Verify they have the same checksum
+ assertEquals(sameManagedKey1.getKeyChecksum(), sameManagedKey2.getKeyChecksum());
+
+ List keyPaths = Arrays.asList(keyPath1, keyPath2);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(sameManagedKey1);
+ when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(sameManagedKey2);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify - second key should overwrite first in the map due to same checksum
+ assertNotNull(cache);
+ assertSame(sameManagedKey1, cache.getLatestSystemKey()); // First is still latest
+
+ // The map should contain the second key for the shared checksum
+ ManagedKeyData retrievedKey = cache.getSystemKeyByChecksum(sameManagedKey1.getKeyChecksum());
+ assertSame(sameManagedKey2, retrievedKey); // Last one wins in TreeMap
+ }
+
+ @Test
+ public void testCreateCacheWithUnexpectedNullKeyData() throws Exception {
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(Arrays.asList(keyPath1));
+ when(mockAccessor.loadSystemKey(keyPath1)).thenThrow(new RuntimeException("Key load error"));
+
+ RuntimeException ex = assertThrows(RuntimeException.class, () -> {
+ SystemKeyCache.createCache(mockAccessor);
+ });
+ assertTrue(ex.getMessage().equals("Key load error"));
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java
index f5c259927475..9cf69775a30e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java
@@ -26,7 +26,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseZKTestingUtil;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -72,7 +71,7 @@ public static void setUpBeforeClass() throws Exception {
CHORE_SERVICE = new ChoreService("TestMasterStateStore");
HFILE_CLEANER_POOL = DirScanPool.getHFileCleanerScanPool(conf);
LOG_CLEANER_POOL = DirScanPool.getLogCleanerScanPool(conf);
- Server server = mock(Server.class);
+ MasterServices server = mock(MasterServices.class);
when(server.getConfiguration()).thenReturn(conf);
when(server.getServerName())
.thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index daaa2e5c2b99..b04380ae450c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -38,6 +38,10 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.hbck.HbckChore;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
@@ -116,6 +120,21 @@ public ChoreService getChoreService() {
return null;
}
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public CatalogJanitor getCatalogJanitor() {
return null;
@@ -136,6 +155,11 @@ public MasterWalManager getMasterWalManager() {
return null;
}
+ @Override
+ public boolean rotateSystemKeyIfChanged() {
+ return false;
+ }
+
@Override
public MasterCoprocessorHost getMasterCoprocessorHost() {
return null;
@@ -569,8 +593,12 @@ public long flushTable(TableName tableName, List columnFamilies, long no
return 0;
}
- @Override
public long rollAllWALWriters(long nonceGroup, long nonce) throws IOException {
return 0;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return this;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index a25bae6ec7bd..b11ccefaadb5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -52,6 +52,10 @@
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
@@ -140,6 +144,9 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BooleanMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyEntryRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
@@ -556,6 +563,21 @@ public ChoreService getChoreService() {
return null;
}
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public void updateRegionFavoredNodesMapping(String encodedRegionName,
List favoredNodes) {
@@ -692,6 +714,24 @@ public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController contr
return null;
}
+ @Override
+ public EmptyMsg refreshSystemKeyCache(RpcController controller, EmptyMsg request)
+ throws ServiceException {
+ return null;
+ }
+
+ @Override
+ public BooleanMsg ejectManagedKeyDataCacheEntry(RpcController controller,
+ ManagedKeyEntryRequest request) throws ServiceException {
+ return null;
+ }
+
+ @Override
+ public EmptyMsg clearManagedKeyDataCache(RpcController controller, EmptyMsg request)
+ throws ServiceException {
+ return null;
+ }
+
@Override
public Connection createConnection(Configuration conf) throws IOException {
return null;
@@ -757,4 +797,9 @@ public ReplicateWALEntryResponse replicateToReplica(RpcController controller,
ReplicateWALEntryRequest request) throws ServiceException {
return null;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index fcb67ed31b47..ac6d754a8396 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskGroup;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -327,5 +328,10 @@ public ClusterStatusTracker getClusterStatusTracker() {
public ActiveMasterManager getActiveMasterManager() {
return activeMasterManager;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
new file mode 100644
index 000000000000..9c3e5991c6e7
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
@@ -0,0 +1,1077 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.Mockito.clearInvocations;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.Consumer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.AsyncAdmin;
+import org.apache.hadoop.hbase.client.AsyncClusterConnection;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdminImpl;
+import org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.runners.Suite;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestKeymetaAdminImpl.TestWhenDisabled.class,
+ TestKeymetaAdminImpl.TestAdminImpl.class, TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class,
+ TestKeymetaAdminImpl.TestMiscAPIs.class,
+ TestKeymetaAdminImpl.TestNewKeyManagementAdminMethods.class })
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaAdminImpl {
+
+ private static final String CUST = "cust1";
+ private static final byte[] CUST_BYTES = CUST.getBytes();
+
+ protected final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+ @Rule
+ public TestName name = new TestName();
+
+ protected Configuration conf;
+ protected Path testRootDir;
+ protected FileSystem fs;
+
+ protected FileSystem mockFileSystem = mock(FileSystem.class);
+ protected MasterServices mockServer = mock(MasterServices.class);
+ protected KeymetaAdminImplForTest keymetaAdmin;
+ KeymetaTableAccessor keymetaAccessor = mock(KeymetaTableAccessor.class);
+
+ @Before
+ public void setUp() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ testRootDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+ fs = testRootDir.getFileSystem(conf);
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
+
+ when(mockServer.getKeyManagementService()).thenReturn(mockServer);
+ when(mockServer.getFileSystem()).thenReturn(mockFileSystem);
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ keymetaAdmin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ // Clear the provider cache to avoid test interference
+ Encryption.clearKeyProviderCache();
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestWhenDisabled extends TestKeymetaAdminImpl {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestWhenDisabled.class);
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
+ }
+
+ @Test
+ public void testDisabled() throws Exception {
+ assertThrows(IOException.class, () -> keymetaAdmin
+ .enableKeyManagement(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, KEY_SPACE_GLOBAL));
+ assertThrows(IOException.class, () -> keymetaAdmin
+ .getManagedKeys(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, KEY_SPACE_GLOBAL));
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAdminImpl extends TestKeymetaAdminImpl {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAdminImpl.class);
+
+ @Parameter(0)
+ public String keySpace;
+ @Parameter(1)
+ public ManagedKeyState keyState;
+ @Parameter(2)
+ public boolean isNullKey;
+
+ @Parameters(name = "{index},keySpace={0},keyState={1}")
+ public static Collection data() {
+ return Arrays
+ .asList(new Object[][] { { KEY_SPACE_GLOBAL, ACTIVE, false }, { "ns1", ACTIVE, false },
+ { KEY_SPACE_GLOBAL, FAILED, true }, { KEY_SPACE_GLOBAL, DISABLED, true }, });
+ }
+
+ @Test
+ public void testEnableAndGet() throws Exception {
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(conf);
+ managedKeyProvider.setMockedKeyState(CUST, keyState);
+ when(keymetaAccessor.getKeyManagementStateMarker(CUST.getBytes(), keySpace))
+ .thenReturn(managedKeyProvider.getManagedKey(CUST.getBytes(), keySpace));
+
+ ManagedKeyData managedKey = keymetaAdmin.enableKeyManagement(CUST_BYTES, keySpace);
+ assertNotNull(managedKey);
+ assertEquals(keyState, managedKey.getKeyState());
+ verify(keymetaAccessor).getKeyManagementStateMarker(CUST.getBytes(), keySpace);
+
+ keymetaAdmin.getManagedKeys(CUST_BYTES, keySpace);
+ verify(keymetaAccessor).getAllKeys(CUST.getBytes(), keySpace, false);
+ }
+
+ @Test
+ public void testEnableKeyManagement() throws Exception {
+ assumeTrue(keyState == ACTIVE);
+ ManagedKeyData managedKey = keymetaAdmin.enableKeyManagement(CUST_BYTES, "namespace1");
+ assertEquals(ManagedKeyState.ACTIVE, managedKey.getKeyState());
+ assertEquals(ManagedKeyProvider.encodeToStr(CUST_BYTES), managedKey.getKeyCustodianEncoded());
+ assertEquals("namespace1", managedKey.getKeyNamespace());
+
+ // Second call should return the same keys since our mock key provider returns the same key
+ ManagedKeyData managedKey2 = keymetaAdmin.enableKeyManagement(CUST_BYTES, "namespace1");
+ assertEquals(managedKey, managedKey2);
+ }
+
+ @Test
+ public void testEnableKeyManagementWithMultipleNamespaces() throws Exception {
+ ManagedKeyData managedKey = keymetaAdmin.enableKeyManagement(CUST_BYTES, "namespace1");
+ assertEquals("namespace1", managedKey.getKeyNamespace());
+
+ ManagedKeyData managedKey2 = keymetaAdmin.enableKeyManagement(CUST_BYTES, "namespace2");
+ assertEquals("namespace2", managedKey2.getKeyNamespace());
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestForKeyProviderNullReturn extends TestKeymetaAdminImpl {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestForKeyProviderNullReturn.class);
+
+ @Parameter(0)
+ public String keySpace;
+
+ @Parameters(name = "{index},keySpace={0}")
+ public static Collection data() {
+ return Arrays.asList(new Object[][] { { KEY_SPACE_GLOBAL }, { "ns1" }, });
+ }
+
+ @Test
+ public void test() throws Exception {
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(conf);
+ String cust = "invalidcust1";
+ byte[] custBytes = cust.getBytes();
+ managedKeyProvider.setMockedKey(cust, null, keySpace);
+ IOException ex = assertThrows(IOException.class,
+ () -> keymetaAdmin.enableKeyManagement(custBytes, keySpace));
+ assertEquals("Invalid null managed key received from key provider", ex.getMessage());
+ }
+ }
+
+ private class KeymetaAdminImplForTest extends KeymetaAdminImpl {
+ public KeymetaAdminImplForTest(MasterServices mockServer, KeymetaTableAccessor mockAccessor) {
+ super(mockServer);
+ }
+
+ @Override
+ public void addKey(ManagedKeyData keyData) throws IOException {
+ keymetaAccessor.addKey(keyData);
+ }
+
+ @Override
+ public List getAllKeys(byte[] key_cust, String keyNamespace,
+ boolean includeMarkers) throws IOException, KeyException {
+ return keymetaAccessor.getAllKeys(key_cust, keyNamespace, includeMarkers);
+ }
+
+ @Override
+ public ManagedKeyData getKeyManagementStateMarker(byte[] key_cust, String keyNamespace)
+ throws IOException, KeyException {
+ return keymetaAccessor.getKeyManagementStateMarker(key_cust, keyNamespace);
+ }
+ }
+
+ protected boolean assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState,
+ Key expectedKey) {
+ assertNotNull(keyData);
+ assertEquals(expKeyState, keyData.getKeyState());
+ if (expectedKey == null) {
+ assertNull(keyData.getTheKey());
+ } else {
+ byte[] keyBytes = keyData.getTheKey().getEncoded();
+ byte[] expectedKeyBytes = expectedKey.getEncoded();
+ assertEquals(expectedKeyBytes.length, keyBytes.length);
+ assertEquals(new Bytes(expectedKeyBytes), keyBytes);
+ }
+ return true;
+ }
+
+ /**
+ * Test class for rotateSTK API
+ */
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestMiscAPIs extends TestKeymetaAdminImpl {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestMiscAPIs.class);
+
+ private ServerManager mockServerManager = mock(ServerManager.class);
+ private AsyncClusterConnection mockConnection;
+ private AsyncAdmin mockAsyncAdmin;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ mockConnection = mock(AsyncClusterConnection.class);
+ mockAsyncAdmin = mock(AsyncAdmin.class);
+ when(mockServer.getServerManager()).thenReturn(mockServerManager);
+ when(mockServer.getAsyncClusterConnection()).thenReturn(mockConnection);
+ when(mockConnection.getAdmin()).thenReturn(mockAsyncAdmin);
+ }
+
+ @Test
+ public void testEnableWithInactiveKey() throws Exception {
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(conf);
+ managedKeyProvider.setMockedKeyState(CUST, INACTIVE);
+ when(keymetaAccessor.getKeyManagementStateMarker(CUST.getBytes(), KEY_SPACE_GLOBAL))
+ .thenReturn(managedKeyProvider.getManagedKey(CUST.getBytes(), KEY_SPACE_GLOBAL));
+
+ IOException exception = assertThrows(IOException.class,
+ () -> keymetaAdmin.enableKeyManagement(CUST_BYTES, KEY_SPACE_GLOBAL));
+ assertTrue(exception.getMessage(),
+ exception.getMessage().contains("Expected key to be ACTIVE, but got an INACTIVE key"));
+ }
+
+ /**
+ * Helper method to test that a method throws IOException when not called on master.
+ * @param adminAction the action to test, taking a KeymetaAdminImpl instance
+ * @param expectedMessageFragment the expected fragment in the error message
+ */
+ private void assertNotOnMasterThrowsException(Consumer adminAction,
+ String expectedMessageFragment) {
+ // Create a non-master server mock
+ Server mockRegionServer = mock(Server.class);
+ KeyManagementService mockKeyService = mock(KeyManagementService.class);
+ when(mockRegionServer.getKeyManagementService()).thenReturn(mockKeyService);
+ when(mockKeyService.getConfiguration()).thenReturn(conf);
+ when(mockRegionServer.getConfiguration()).thenReturn(conf);
+ when(mockRegionServer.getFileSystem()).thenReturn(mockFileSystem);
+
+ KeymetaAdminImpl admin = new KeymetaAdminImpl(mockRegionServer) {
+ @Override
+ protected AsyncAdmin getAsyncAdmin(MasterServices master) {
+ throw new RuntimeException("Shouldn't be called since we are not on master");
+ }
+ };
+
+ RuntimeException runtimeEx =
+ assertThrows(RuntimeException.class, () -> adminAction.accept(admin));
+ assertTrue(runtimeEx.getCause() instanceof IOException);
+ IOException ex = (IOException) runtimeEx.getCause();
+ assertTrue(ex.getMessage().contains(expectedMessageFragment));
+ }
+
+ /**
+ * Helper method to test that a method throws IOException when key management is disabled.
+ * @param adminAction the action to test, taking a KeymetaAdminImpl instance
+ */
+ private void assertDisabledThrowsException(Consumer adminAction) {
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
+
+ KeymetaAdminImpl admin = new KeymetaAdminImpl(mockServer) {
+ @Override
+ protected AsyncAdmin getAsyncAdmin(MasterServices master) {
+ throw new RuntimeException("Shouldn't be called since we are disabled");
+ }
+ };
+
+ RuntimeException runtimeEx =
+ assertThrows(RuntimeException.class, () -> adminAction.accept(admin));
+ assertTrue(runtimeEx.getCause() instanceof IOException);
+ IOException ex = (IOException) runtimeEx.getCause();
+ assertTrue("Exception message should contain 'not enabled', but was: " + ex.getMessage(),
+ ex.getMessage().contains("not enabled"));
+ }
+
+ /**
+ * Test rotateSTK when a new key is detected. Now that we can mock SystemKeyManager via
+ * master.getSystemKeyManager(), we can properly test the success scenario: 1.
+ * SystemKeyManager.rotateSystemKeyIfChanged() returns non-null (new key detected) 2. Master
+ * gets list of online region servers 3. Master makes parallel RPC calls to all region servers
+ * 4. All region servers successfully rebuild their system key cache 5. Method returns true
+ */
+ @Test
+ public void testRotateSTKWithNewKey() throws Exception {
+ // Setup mocks for MasterServices
+ // Mock SystemKeyManager to return a new key (non-null)
+ when(mockServer.rotateSystemKeyIfChanged()).thenReturn(true);
+
+ when(mockAsyncAdmin.refreshSystemKeyCacheOnServers(any()))
+ .thenReturn(CompletableFuture.completedFuture(null));
+
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+
+ // Call rotateSTK - should return true since new key was detected
+ boolean result = admin.rotateSTK();
+
+ // Verify the result
+ assertTrue("rotateSTK should return true when new key is detected", result);
+
+ // Verify that rotateSystemKeyIfChanged was called
+ verify(mockServer).rotateSystemKeyIfChanged();
+ verify(mockAsyncAdmin).refreshSystemKeyCacheOnServers(any());
+ }
+
+ /**
+ * Test rotateSTK when no key change is detected. Now that we can mock SystemKeyManager, we can
+ * properly test the no-change scenario: 1. SystemKeyManager.rotateSystemKeyIfChanged() returns
+ * null 2. Method returns false immediately without calling any region servers 3. No RPC calls
+ * are made to region servers
+ */
+ @Test
+ public void testRotateSTKNoChange() throws Exception {
+ // Mock SystemKeyManager to return null (no key change)
+ when(mockServer.rotateSystemKeyIfChanged()).thenReturn(false);
+
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+
+ // Call rotateSTK - should return false since no key change was detected
+ boolean result = admin.rotateSTK();
+
+ // Verify the result
+ assertFalse("rotateSTK should return false when no key change is detected", result);
+
+ // Verify that rotateSystemKeyIfChanged was called
+ verify(mockServer).rotateSystemKeyIfChanged();
+
+ // Verify that getOnlineServersList was never called (short-circuit behavior)
+ verify(mockServerManager, never()).getOnlineServersList();
+ }
+
+ @Test
+ public void testRotateSTKOnIOException() throws Exception {
+ when(mockServer.rotateSystemKeyIfChanged()).thenThrow(new IOException("test"));
+
+ KeymetaAdminImpl admin = new KeymetaAdminImpl(mockServer);
+ IOException ex = assertThrows(IOException.class, () -> admin.rotateSTK());
+ assertTrue("Exception message should contain 'test', but was: " + ex.getMessage(),
+ ex.getMessage().equals("test"));
+ }
+
+ /**
+ * Test rotateSTK when region server refresh fails.
+ */
+ @Test
+ public void testRotateSTKWithFailedServerRefresh() throws Exception {
+ // Setup mocks for MasterServices
+ // Mock SystemKeyManager to return a new key (non-null)
+ when(mockServer.rotateSystemKeyIfChanged()).thenReturn(true);
+
+ CompletableFuture failedFuture = new CompletableFuture<>();
+ failedFuture.completeExceptionally(new IOException("refresh failed"));
+ when(mockAsyncAdmin.refreshSystemKeyCacheOnServers(any())).thenReturn(failedFuture);
+
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+
+ // Call rotateSTK and expect IOException
+ IOException ex = assertThrows(IOException.class, () -> admin.rotateSTK());
+
+ assertTrue(ex.getMessage()
+ .contains("Failed to initiate System Key cache refresh on one or more region servers"));
+
+ // Verify that rotateSystemKeyIfChanged was called
+ verify(mockServer).rotateSystemKeyIfChanged();
+ verify(mockAsyncAdmin).refreshSystemKeyCacheOnServers(any());
+ }
+
+ @Test
+ public void testRotateSTKNotOnMaster() throws Exception {
+ assertNotOnMasterThrowsException(admin -> {
+ try {
+ admin.rotateSTK();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }, "rotateSTK can only be called on master");
+ }
+
+ @Test
+ public void testEjectManagedKeyDataCacheEntryNotOnMaster() throws Exception {
+ byte[] keyCustodian = Bytes.toBytes("testCustodian");
+ String keyNamespace = "testNamespace";
+ String keyMetadata = "testMetadata";
+
+ assertNotOnMasterThrowsException(admin -> {
+ try {
+ admin.ejectManagedKeyDataCacheEntry(keyCustodian, keyNamespace, keyMetadata);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }, "ejectManagedKeyDataCacheEntry can only be called on master");
+ }
+
+ @Test
+ public void testClearManagedKeyDataCacheNotOnMaster() throws Exception {
+ assertNotOnMasterThrowsException(admin -> {
+ try {
+ admin.clearManagedKeyDataCache();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }, "clearManagedKeyDataCache can only be called on master");
+ }
+
+ @Test
+ public void testRotateSTKWhenDisabled() throws Exception {
+ assertDisabledThrowsException(admin -> {
+ try {
+ admin.rotateSTK();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ @Test
+ public void testEjectManagedKeyDataCacheEntryWhenDisabled() throws Exception {
+ byte[] keyCustodian = Bytes.toBytes("testCustodian");
+ String keyNamespace = "testNamespace";
+ String keyMetadata = "testMetadata";
+
+ assertDisabledThrowsException(admin -> {
+ try {
+ admin.ejectManagedKeyDataCacheEntry(keyCustodian, keyNamespace, keyMetadata);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ @Test
+ public void testClearManagedKeyDataCacheWhenDisabled() throws Exception {
+ assertDisabledThrowsException(admin -> {
+ try {
+ admin.clearManagedKeyDataCache();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ /**
+ * Test ejectManagedKeyDataCacheEntry API - verify it calls the AsyncAdmin method with correct
+ * parameters
+ */
+ @Test
+ public void testEjectManagedKeyDataCacheEntry() throws Exception {
+ byte[] keyCustodian = Bytes.toBytes("testCustodian");
+ String keyNamespace = "testNamespace";
+ String keyMetadata = "testMetadata";
+
+ when(mockAsyncAdmin.ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(), any()))
+ .thenReturn(CompletableFuture.completedFuture(null));
+
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+
+ // Call the method
+ admin.ejectManagedKeyDataCacheEntry(keyCustodian, keyNamespace, keyMetadata);
+
+ // Verify the AsyncAdmin method was called
+ verify(mockAsyncAdmin).ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(), any());
+ }
+
+ /**
+ * Test ejectManagedKeyDataCacheEntry when it fails
+ */
+ @Test
+ public void testEjectManagedKeyDataCacheEntryWithFailure() throws Exception {
+ byte[] keyCustodian = Bytes.toBytes("testCustodian");
+ String keyNamespace = "testNamespace";
+ String keyMetadata = "testMetadata";
+
+ CompletableFuture failedFuture = new CompletableFuture<>();
+ failedFuture.completeExceptionally(new IOException("eject failed"));
+ when(mockAsyncAdmin.ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(), any()))
+ .thenReturn(failedFuture);
+
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+
+ // Call the method and expect IOException
+ IOException ex = assertThrows(IOException.class,
+ () -> admin.ejectManagedKeyDataCacheEntry(keyCustodian, keyNamespace, keyMetadata));
+
+ assertTrue(ex.getMessage().contains("eject failed"));
+ verify(mockAsyncAdmin).ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(), any());
+ }
+
+ /**
+ * Test clearManagedKeyDataCache API - verify it calls the AsyncAdmin method
+ */
+ @Test
+ public void testClearManagedKeyDataCache() throws Exception {
+ when(mockAsyncAdmin.clearManagedKeyDataCacheOnServers(any()))
+ .thenReturn(CompletableFuture.completedFuture(null));
+
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+
+ // Call the method
+ admin.clearManagedKeyDataCache();
+
+ // Verify the AsyncAdmin method was called
+ verify(mockAsyncAdmin).clearManagedKeyDataCacheOnServers(any());
+ }
+
+ /**
+ * Test clearManagedKeyDataCache when it fails
+ */
+ @Test
+ public void testClearManagedKeyDataCacheWithFailure() throws Exception {
+ CompletableFuture failedFuture = new CompletableFuture<>();
+ failedFuture.completeExceptionally(new IOException("clear failed"));
+ when(mockAsyncAdmin.clearManagedKeyDataCacheOnServers(any())).thenReturn(failedFuture);
+
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+
+ // Call the method and expect IOException
+ IOException ex = assertThrows(IOException.class, () -> admin.clearManagedKeyDataCache());
+
+ assertTrue(ex.getMessage().contains("clear failed"));
+ verify(mockAsyncAdmin).clearManagedKeyDataCacheOnServers(any());
+ }
+ }
+
+ /**
+ * Tests for new key management admin methods.
+ */
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestNewKeyManagementAdminMethods {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestNewKeyManagementAdminMethods.class);
+
+ @Mock
+ private MasterServices mockMasterServices;
+ @Mock
+ private AsyncAdmin mockAsyncAdmin;
+ @Mock
+ private AsyncClusterConnection mockAsyncClusterConnection;
+ @Mock
+ private ServerManager mockServerManager;
+ @Mock
+ private KeymetaTableAccessor mockAccessor;
+ @Mock
+ private ManagedKeyProvider mockProvider;
+ @Mock
+ private KeyManagementService mockKeyManagementService;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.openMocks(this);
+ when(mockMasterServices.getAsyncClusterConnection()).thenReturn(mockAsyncClusterConnection);
+ when(mockAsyncClusterConnection.getAdmin()).thenReturn(mockAsyncAdmin);
+ when(mockMasterServices.getServerManager()).thenReturn(mockServerManager);
+ when(mockServerManager.getOnlineServersList()).thenReturn(new ArrayList<>());
+
+ // Setup KeyManagementService mock
+ Configuration conf = HBaseConfiguration.create();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockKeyManagementService.getConfiguration()).thenReturn(conf);
+ when(mockMasterServices.getKeyManagementService()).thenReturn(mockKeyManagementService);
+ }
+
+ @Test
+ public void testDisableKeyManagement() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ ManagedKeyData activeKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ ManagedKeyData disabledMarker =
+ new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, ManagedKeyState.DISABLED);
+
+ when(mockAccessor.getKeyManagementStateMarker(any(), any())).thenReturn(activeKey)
+ .thenReturn(disabledMarker);
+
+ ManagedKeyData result = admin.disableKeyManagement(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ assertNotNull(result);
+ assertEquals(ManagedKeyState.DISABLED, result.getKeyState());
+ verify(mockAccessor, times(2)).getKeyManagementStateMarker(CUST_BYTES, KEY_SPACE_GLOBAL);
+ verify(mockAccessor).updateActiveState(activeKey, ManagedKeyState.INACTIVE);
+
+ // Repeat the call for idempotency check.
+ clearInvocations(mockAccessor);
+ when(mockAccessor.getKeyManagementStateMarker(any(), any())).thenReturn(disabledMarker);
+ result = admin.disableKeyManagement(CUST_BYTES, KEY_SPACE_GLOBAL);
+ assertNotNull(result);
+ assertEquals(ManagedKeyState.DISABLED, result.getKeyState());
+ verify(mockAccessor, times(2)).getKeyManagementStateMarker(CUST_BYTES, KEY_SPACE_GLOBAL);
+ verify(mockAccessor, never()).updateActiveState(any(), any());
+ }
+
+ @Test
+ public void testDisableManagedKey() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ ManagedKeyData disabledKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.DISABLED, "metadata1", 123L);
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash("metadata1");
+ when(mockAccessor.getKey(any(), any(), any())).thenReturn(disabledKey);
+
+ CompletableFuture successFuture = CompletableFuture.completedFuture(null);
+ when(mockAsyncAdmin.ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(), any()))
+ .thenReturn(successFuture);
+
+ IOException exception = assertThrows(IOException.class,
+ () -> admin.disableManagedKey(CUST_BYTES, KEY_SPACE_GLOBAL, keyMetadataHash));
+ assertTrue(exception.getMessage(),
+ exception.getMessage().contains("Key is already disabled"));
+ verify(mockAccessor, never()).disableKey(any(ManagedKeyData.class));
+ }
+
+ @Test
+ public void testDisableManagedKeyNotFound() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash("metadata1");
+ // Return null to simulate key not found
+ when(mockAccessor.getKey(any(), any(), any())).thenReturn(null);
+
+ IOException exception = assertThrows(IOException.class,
+ () -> admin.disableManagedKey(CUST_BYTES, KEY_SPACE_GLOBAL, keyMetadataHash));
+ assertTrue(exception.getMessage(),
+ exception.getMessage()
+ .contains("Key not found for (custodian: Y3VzdDE=, namespace: *) with metadata hash: "
+ + ManagedKeyProvider.encodeToStr(keyMetadataHash)));
+ verify(mockAccessor).getKey(CUST_BYTES, KEY_SPACE_GLOBAL, keyMetadataHash);
+ }
+
+ @Test
+ public void testRotateManagedKeyNoActiveKey() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ // Return null to simulate no active key exists
+ when(mockAccessor.getKeyManagementStateMarker(any(), any())).thenReturn(null);
+
+ IOException exception =
+ assertThrows(IOException.class, () -> admin.rotateManagedKey(CUST_BYTES, KEY_SPACE_GLOBAL));
+ assertTrue(exception.getMessage().contains("No active key found"));
+ verify(mockAccessor).getKeyManagementStateMarker(CUST_BYTES, KEY_SPACE_GLOBAL);
+ }
+
+ @Test
+ public void testRotateManagedKey() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ ManagedKeyData currentKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ ManagedKeyData newKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata2", 124L);
+
+ when(mockAccessor.getKeyManagementStateMarker(any(), any())).thenReturn(currentKey);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockProvider.getManagedKey(any(), any())).thenReturn(newKey);
+
+ ManagedKeyData result = admin.rotateManagedKey(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ assertNotNull(result);
+ assertEquals(newKey, result);
+ verify(mockAccessor).addKey(newKey);
+ verify(mockAccessor).updateActiveState(currentKey, ManagedKeyState.INACTIVE);
+ }
+
+ @Test
+ public void testRefreshManagedKeysWithNoStateChange() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ List keys = new ArrayList<>();
+ ManagedKeyData key1 = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ keys.add(key1);
+
+ when(mockAccessor.getAllKeys(any(), any(), anyBoolean())).thenReturn(keys);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(key1);
+
+ admin.refreshManagedKeys(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ verify(mockAccessor).getAllKeys(CUST_BYTES, KEY_SPACE_GLOBAL, false);
+ verify(mockAccessor, never()).updateActiveState(any(), any());
+ verify(mockAccessor, never()).disableKey(any());
+ }
+
+ @Test
+ public void testRotateManagedKeyIgnoresFailedKey() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ ManagedKeyData currentKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ ManagedKeyData newKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.FAILED, "metadata1", 124L);
+
+ when(mockAccessor.getKeyManagementStateMarker(any(), any())).thenReturn(currentKey);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockProvider.getManagedKey(any(), any())).thenReturn(newKey);
+ // Mock the AsyncAdmin for ejectManagedKeyDataCacheEntry
+ when(mockAsyncAdmin.ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(), any()))
+ .thenReturn(CompletableFuture.completedFuture(null));
+
+ ManagedKeyData result = admin.rotateManagedKey(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ assertNull(result);
+ // Verify that the active key was not marked as inactive
+ verify(mockAccessor, never()).addKey(any());
+ verify(mockAccessor, never()).updateActiveState(any(), any());
+ }
+
+ @Test
+ public void testRotateManagedKeyNoRotation() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ // Current and new keys have the same metadata hash, so no rotation should occur
+ ManagedKeyData currentKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+
+ when(mockAccessor.getKeyManagementStateMarker(any(), any())).thenReturn(currentKey);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockProvider.getManagedKey(any(), any())).thenReturn(currentKey);
+
+ ManagedKeyData result = admin.rotateManagedKey(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ assertNull(result);
+ verify(mockAccessor, never()).updateActiveState(any(), any());
+ verify(mockAccessor, never()).addKey(any());
+ verify(mockAsyncAdmin, never()).ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(),
+ any());
+ }
+
+ @Test
+ public void testRefreshManagedKeysWithStateChange() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ List keys = new ArrayList<>();
+ ManagedKeyData key1 = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ keys.add(key1);
+
+ // Refreshed key has a different state (INACTIVE)
+ ManagedKeyData refreshedKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.INACTIVE, "metadata1", 123L);
+
+ when(mockAccessor.getAllKeys(any(), any(), anyBoolean())).thenReturn(keys);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(refreshedKey);
+
+ admin.refreshManagedKeys(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ verify(mockAccessor).getAllKeys(CUST_BYTES, KEY_SPACE_GLOBAL, false);
+ verify(mockAccessor).updateActiveState(key1, ManagedKeyState.INACTIVE);
+ }
+
+ @Test
+ public void testRefreshManagedKeysWithDisabledState() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ List keys = new ArrayList<>();
+ ManagedKeyData key1 = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ keys.add(key1);
+
+ // Refreshed key is DISABLED
+ ManagedKeyData disabledKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.DISABLED, "metadata1", 123L);
+
+ when(mockAccessor.getAllKeys(any(), any(), anyBoolean())).thenReturn(keys);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockProvider.unwrapKey(any(), any())).thenReturn(disabledKey);
+ // Mock the ejectManagedKeyDataCacheEntry to cover line 263
+ when(mockAsyncAdmin.ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(), any()))
+ .thenReturn(CompletableFuture.completedFuture(null));
+
+ admin.refreshManagedKeys(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ verify(mockAccessor).getAllKeys(CUST_BYTES, KEY_SPACE_GLOBAL, false);
+ verify(mockAccessor).disableKey(key1);
+ // Verify cache ejection was called (line 263)
+ verify(mockAsyncAdmin).ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(), any());
+ }
+
+ @Test
+ public void testRefreshManagedKeysWithException() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ List keys = new ArrayList<>();
+ ManagedKeyData key1 = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ ManagedKeyData key2 = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata2", 124L);
+ ManagedKeyData refreshedKey1 = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.INACTIVE, "metadata1", 123L);
+ keys.add(key1);
+ keys.add(key2);
+
+ when(mockAccessor.getAllKeys(any(), any(), anyBoolean())).thenReturn(keys);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ // First key throws IOException, second key should still be refreshed
+ doThrow(new IOException("Simulated error")).when(mockAccessor)
+ .updateActiveState(any(ManagedKeyData.class), any(ManagedKeyState.class));
+ when(mockProvider.unwrapKey(key1.getKeyMetadata(), null)).thenReturn(refreshedKey1);
+ when(mockProvider.unwrapKey(key2.getKeyMetadata(), null)).thenReturn(key2);
+
+ // Should not throw exception, should continue refreshing other keys
+ IOException exception = assertThrows(IOException.class,
+ () -> admin.refreshManagedKeys(CUST_BYTES, KEY_SPACE_GLOBAL));
+
+ assertTrue(exception.getCause() instanceof IOException);
+ assertTrue(exception.getCause().getMessage(),
+ exception.getCause().getMessage().contains("Simulated error"));
+ verify(mockAccessor).getAllKeys(CUST_BYTES, KEY_SPACE_GLOBAL, false);
+ verify(mockAccessor, never()).disableKey(any());
+ verify(mockProvider).unwrapKey(key1.getKeyMetadata(), null);
+ verify(mockProvider).unwrapKey(key2.getKeyMetadata(), null);
+ verify(mockAsyncAdmin, never()).ejectManagedKeyDataCacheEntryOnServers(any(), any(), any(),
+ any());
+ }
+
+ @Test
+ public void testRefreshKeyWithMetadataValidationFailure() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ ManagedKeyData originalKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ // Refreshed key has different metadata (which should not happen and indicates a serious
+ // error)
+ ManagedKeyData refreshedKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata2", 124L);
+
+ List keys = Arrays.asList(originalKey);
+ when(mockAccessor.getAllKeys(any(), any(), anyBoolean())).thenReturn(keys);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockProvider.unwrapKey(originalKey.getKeyMetadata(), null)).thenReturn(refreshedKey);
+
+ // The metadata mismatch triggers a KeyException which gets wrapped in an IOException
+ IOException exception = assertThrows(IOException.class,
+ () -> admin.refreshManagedKeys(CUST_BYTES, KEY_SPACE_GLOBAL));
+ assertTrue(exception.getCause() instanceof KeyException);
+ assertTrue(exception.getCause().getMessage(),
+ exception.getCause().getMessage().contains("Key metadata changed during refresh"));
+ verify(mockProvider).unwrapKey(originalKey.getKeyMetadata(), null);
+ // No state updates should happen due to the exception
+ verify(mockAccessor, never()).updateActiveState(any(), any());
+ verify(mockAccessor, never()).disableKey(any());
+ }
+
+ @Test
+ public void testRefreshKeyWithFailedStateIgnored() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ ManagedKeyData originalKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 123L);
+ // Refreshed key is in FAILED state (provider issue) - using byte[] metadata hash constructor
+ ManagedKeyData failedKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.FAILED, "metadata1", 124L);
+
+ List keys = Arrays.asList(originalKey);
+ when(mockAccessor.getAllKeys(any(), any(), anyBoolean())).thenReturn(keys);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockProvider.unwrapKey(originalKey.getKeyMetadata(), null)).thenReturn(failedKey);
+
+ admin.refreshManagedKeys(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ // Should not update state when refreshed key is FAILED
+ verify(mockAccessor, never()).updateActiveState(any(), any());
+ verify(mockAccessor, never()).disableKey(any());
+ verify(mockProvider).unwrapKey(originalKey.getKeyMetadata(), null);
+ }
+
+ @Test
+ public void testRefreshKeyRecoveryFromPriorEnableFailure() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ // FAILED key with null metadata (lines 119-135 in KeyManagementUtils)
+ ManagedKeyData failedKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, FAILED, 123L);
+
+ // Provider returns a recovered key
+ ManagedKeyData recoveredKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, null,
+ ManagedKeyState.ACTIVE, "metadata1", 124L);
+
+ List keys = Arrays.asList(failedKey);
+ when(mockAccessor.getAllKeys(any(), any(), anyBoolean())).thenReturn(keys);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockAccessor.getKeyManagementStateMarker(CUST_BYTES, KEY_SPACE_GLOBAL))
+ .thenReturn(failedKey);
+ when(mockProvider.getManagedKey(failedKey.getKeyCustodian(), failedKey.getKeyNamespace()))
+ .thenReturn(recoveredKey);
+
+ admin.refreshManagedKeys(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ // Should call getManagedKey for FAILED key with null metadata (line 125)
+ verify(mockProvider).getManagedKey(failedKey.getKeyCustodian(), failedKey.getKeyNamespace());
+ // Should add recovered key (line 130)
+ verify(mockAccessor).addKey(recoveredKey);
+ }
+
+ @Test
+ public void testRefreshKeyNoRecoveryFromPriorEnableFailure() throws Exception {
+ KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockMasterServices, mockAccessor);
+
+ // FAILED key with null metadata
+ ManagedKeyData failedKey = new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, FAILED, 123L);
+
+ // Provider returns another FAILED key (recovery didn't work)
+ ManagedKeyData stillFailedKey =
+ new ManagedKeyData(CUST_BYTES, KEY_SPACE_GLOBAL, ManagedKeyState.FAILED, 124L);
+
+ List keys = Arrays.asList(failedKey);
+ when(mockAccessor.getAllKeys(any(), any(), anyBoolean())).thenReturn(keys);
+ when(mockAccessor.getKeyProvider()).thenReturn(mockProvider);
+ when(mockAccessor.getKeyManagementStateMarker(CUST_BYTES, KEY_SPACE_GLOBAL))
+ .thenReturn(failedKey);
+ when(mockProvider.getManagedKey(failedKey.getKeyCustodian(), failedKey.getKeyNamespace()))
+ .thenReturn(stillFailedKey);
+
+ admin.refreshManagedKeys(CUST_BYTES, KEY_SPACE_GLOBAL);
+
+ // Should call getManagedKey for FAILED key with null metadata
+ verify(mockProvider).getManagedKey(failedKey.getKeyCustodian(), failedKey.getKeyNamespace());
+ verify(mockAccessor, never()).addKey(any());
+ }
+
+ private class KeymetaAdminImplForTest extends KeymetaAdminImpl {
+ private final KeymetaTableAccessor accessor;
+
+ public KeymetaAdminImplForTest(MasterServices server, KeymetaTableAccessor accessor)
+ throws IOException {
+ super(server);
+ this.accessor = accessor;
+ }
+
+ @Override
+ protected AsyncAdmin getAsyncAdmin(MasterServices master) {
+ return mockAsyncAdmin;
+ }
+
+ @Override
+ public List getAllKeys(byte[] keyCust, String keyNamespace,
+ boolean includeMarkers) throws IOException, KeyException {
+ return accessor.getAllKeys(keyCust, keyNamespace, includeMarkers);
+ }
+
+ @Override
+ public ManagedKeyData getKey(byte[] keyCust, String keyNamespace, byte[] keyMetadataHash)
+ throws IOException, KeyException {
+ return accessor.getKey(keyCust, keyNamespace, keyMetadataHash);
+ }
+
+ @Override
+ public void disableKey(ManagedKeyData keyData) throws IOException {
+ accessor.disableKey(keyData);
+ }
+
+ @Override
+ public ManagedKeyData getKeyManagementStateMarker(byte[] keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ return accessor.getKeyManagementStateMarker(keyCust, keyNamespace);
+ }
+
+ @Override
+ public void addKeyManagementStateMarker(byte[] keyCust, String keyNamespace,
+ ManagedKeyState state) throws IOException {
+ accessor.addKeyManagementStateMarker(keyCust, keyNamespace, state);
+ }
+
+ @Override
+ public ManagedKeyProvider getKeyProvider() {
+ return accessor.getKeyProvider();
+ }
+
+ @Override
+ public void addKey(ManagedKeyData keyData) throws IOException {
+ accessor.addKey(keyData);
+ }
+
+ @Override
+ public void updateActiveState(ManagedKeyData keyData, ManagedKeyState newState)
+ throws IOException {
+ accessor.updateActiveState(keyData, newState);
+ }
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
index 5e6b8db58243..71b24ecc954a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.StartTestingClusterOption;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
@@ -40,10 +41,16 @@
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Suite;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ FlakeyTests.class, LargeTests.class })
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestMasterFailover.TestMasterFailoverDefaultConfig.class,
+ TestMasterFailover.TestSimpleMasterFailoverWithKeymeta.class })
public class TestMasterFailover {
@ClassRule
@@ -54,19 +61,11 @@ public class TestMasterFailover {
@Rule
public TestName name = new TestName();
- /**
- * Simple test of master failover.
- *
- * Starts with three masters. Kills a backup master. Then kills the active master. Ensures the
- * final master becomes active and we can still contact the cluster.
- */
- @Test
- public void testSimpleMasterFailover() throws Exception {
+ protected static void doTestSimpleMasterFailover(HBaseTestingUtil TEST_UTIL) throws Exception {
final int NUM_MASTERS = 3;
final int NUM_RS = 3;
// Start the cluster
- HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
try {
StartTestingClusterOption option = StartTestingClusterOption.builder().numMasters(NUM_MASTERS)
.numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
@@ -168,50 +167,90 @@ public void testSimpleMasterFailover() throws Exception {
}
}
- /**
- * Test meta in transition when master failover. This test used to manipulate region state up in
- * zk. That is not allowed any more in hbase2 so I removed that messing. That makes this test
- * anemic.
- */
- @Test
- public void testMetaInTransitionWhenMasterFailover() throws Exception {
- // Start the cluster
- HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
- TEST_UTIL.startMiniCluster();
- try {
- SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
- LOG.info("Cluster started");
-
- HMaster activeMaster = cluster.getMaster();
- ServerName metaServerName = cluster.getServerHoldingMeta();
- HRegionServer hrs = cluster.getRegionServer(metaServerName);
-
- // Now kill master, meta should remain on rs, where we placed it before.
- LOG.info("Aborting master");
- activeMaster.abort("test-kill");
- cluster.waitForMasterToStop(activeMaster.getServerName(), 30000);
- LOG.info("Master has aborted");
-
- // meta should remain where it was
- RegionState metaState = MetaTableLocator.getMetaRegionState(hrs.getZooKeeper());
- assertEquals("hbase:meta should be online on RS", metaState.getServerName(), metaServerName);
- assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());
-
- // Start up a new master
- LOG.info("Starting up a new master");
- activeMaster = cluster.startMaster().getMaster();
- LOG.info("Waiting for master to be ready");
- cluster.waitForActiveAndReadyMaster();
- LOG.info("Master is ready");
-
- // ensure meta is still deployed on RS
- metaState = MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper());
- assertEquals("hbase:meta should be online on RS", metaState.getServerName(), metaServerName);
- assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());
-
- // Done, shutdown the cluster
- } finally {
- TEST_UTIL.shutdownMiniCluster();
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ FlakeyTests.class, LargeTests.class })
+ public static class TestMasterFailoverDefaultConfig {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestMasterFailoverDefaultConfig.class);
+
+ /**
+ * Simple test of master failover.
+ *
+ * Starts with three masters. Kills a backup master. Then kills the active master. Ensures the
+ * final master becomes active and we can still contact the cluster.
+ */
+ @Test
+ public void testSimpleMasterFailover() throws Exception {
+ HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+ doTestSimpleMasterFailover(TEST_UTIL);
+ }
+
+ /**
+ * Test meta in transition when master failover. This test used to manipulate region state up in
+ * zk. That is not allowed any more in hbase2 so I removed that messing. That makes this test
+ * anemic.
+ */
+ @Test
+ public void testMetaInTransitionWhenMasterFailover() throws Exception {
+ // Start the cluster
+ HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+ TEST_UTIL.startMiniCluster();
+ try {
+ SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+ LOG.info("Cluster started");
+
+ HMaster activeMaster = cluster.getMaster();
+ ServerName metaServerName = cluster.getServerHoldingMeta();
+ HRegionServer hrs = cluster.getRegionServer(metaServerName);
+
+ // Now kill master, meta should remain on rs, where we placed it before.
+ LOG.info("Aborting master");
+ activeMaster.abort("test-kill");
+ cluster.waitForMasterToStop(activeMaster.getServerName(), 30000);
+ LOG.info("Master has aborted");
+
+ // meta should remain where it was
+ RegionState metaState = MetaTableLocator.getMetaRegionState(hrs.getZooKeeper());
+ assertEquals("hbase:meta should be online on RS", metaState.getServerName(),
+ metaServerName);
+ assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());
+
+ // Start up a new master
+ LOG.info("Starting up a new master");
+ activeMaster = cluster.startMaster().getMaster();
+ LOG.info("Waiting for master to be ready");
+ cluster.waitForActiveAndReadyMaster();
+ LOG.info("Master is ready");
+
+ // ensure meta is still deployed on RS
+ metaState = MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper());
+ assertEquals("hbase:meta should be online on RS", metaState.getServerName(),
+ metaServerName);
+ assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());
+
+ // Done, shutdown the cluster
+ } finally {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ FlakeyTests.class, LargeTests.class })
+ public static class TestSimpleMasterFailoverWithKeymeta extends ManagedKeyTestBase {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSimpleMasterFailoverWithKeymeta.class);
+
+ @Test
+ public void testSimpleMasterFailoverWithKeymeta() throws Exception {
+ doTestSimpleMasterFailover(TEST_UTIL);
+ }
+
+ @Override
+ protected boolean isWithMiniClusterStart() {
+ return false;
}
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
new file mode 100644
index 000000000000..09e409b11e7d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
@@ -0,0 +1,523 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Key;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.IntStream;
+import javax.crypto.spec.SecretKeySpec;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.runners.Suite;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestSystemKeyAccessorAndManager.TestAccessorWhenDisabled.class,
+ TestSystemKeyAccessorAndManager.TestManagerWhenDisabled.class,
+ TestSystemKeyAccessorAndManager.TestAccessor.class,
+ TestSystemKeyAccessorAndManager.TestForInvalidFilenames.class,
+ TestSystemKeyAccessorAndManager.TestManagerForErrors.class,
+ TestSystemKeyAccessorAndManager.TestAccessorMisc.class // ADD THIS
+})
+@Category({ MasterTests.class, SmallTests.class })
+public class TestSystemKeyAccessorAndManager {
+ private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+ @Rule
+ public TestName name = new TestName();
+
+ protected Configuration conf;
+ protected Path testRootDir;
+ protected FileSystem fs;
+
+ protected FileSystem mockFileSystem = mock(FileSystem.class);
+ protected MasterServices mockMaster = mock(MasterServices.class);
+ protected SystemKeyManager systemKeyManager;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ testRootDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+ fs = testRootDir.getFileSystem(conf);
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+
+ when(mockMaster.getFileSystem()).thenReturn(mockFileSystem);
+ when(mockMaster.getConfiguration()).thenReturn(conf);
+ systemKeyManager = new SystemKeyManager(mockMaster);
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAccessorWhenDisabled extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAccessorWhenDisabled.class);
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
+ }
+
+ @Test
+ public void test() throws Exception {
+ assertThrows(IOException.class, () -> systemKeyManager.getAllSystemKeyFiles());
+ assertThrows(IOException.class, () -> systemKeyManager.getLatestSystemKeyFile().getFirst());
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestManagerWhenDisabled extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagerWhenDisabled.class);
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
+ }
+
+ @Test
+ public void test() throws Exception {
+ systemKeyManager.ensureSystemKeyInitialized();
+ assertNull(systemKeyManager.rotateSystemKeyIfChanged());
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAccessor extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAccessor.class);
+
+ @Test
+ public void testGetLatestWithNone() throws Exception {
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
+
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> systemKeyManager.getLatestSystemKeyFile());
+ assertEquals("No cluster key initialized yet", ex.getMessage());
+ }
+
+ @Test
+ public void testGetWithSingle() throws Exception {
+ String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
+ FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName);
+
+ Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
+ when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))))
+ .thenReturn(new FileStatus[] { mockFileStatus });
+
+ List files = systemKeyManager.getAllSystemKeyFiles();
+ assertEquals(1, files.size());
+ assertEquals(fileName, files.get(0).getName());
+
+ Pair> latestSystemKeyFileResult = systemKeyManager.getLatestSystemKeyFile();
+ assertEquals(fileName, latestSystemKeyFileResult.getFirst().getName());
+
+ assertEquals(1,
+ SystemKeyAccessor.extractSystemKeySeqNum(latestSystemKeyFileResult.getFirst()));
+ }
+
+ @Test
+ public void testGetWithMultiple() throws Exception {
+ FileStatus[] mockFileStatuses = IntStream.rangeClosed(1, 3)
+ .mapToObj(i -> KeymetaTestUtils.createMockFile(SYSTEM_KEY_FILE_PREFIX + i))
+ .toArray(FileStatus[]::new);
+
+ Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
+ when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))))
+ .thenReturn(mockFileStatuses);
+
+ List files = systemKeyManager.getAllSystemKeyFiles();
+ assertEquals(3, files.size());
+
+ Pair> latestSystemKeyFileResult = systemKeyManager.getLatestSystemKeyFile();
+ assertEquals(3,
+ SystemKeyAccessor.extractSystemKeySeqNum(latestSystemKeyFileResult.getFirst()));
+ }
+
+ @Test
+ public void testExtractKeySequenceForInvalidFilename() throws Exception {
+ assertEquals(-1,
+ SystemKeyAccessor.extractKeySequence(KeymetaTestUtils.createMockFile("abcd").getPath()));
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestForInvalidFilenames extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestForInvalidFilenames.class);
+
+ @Parameter(0)
+ public String fileName;
+ @Parameter(1)
+ public String expectedErrorMessage;
+
+ @Parameters(name = "{index},fileName={0}")
+ public static Collection data() {
+ return Arrays.asList(new Object[][] { { "abcd", "Couldn't parse key file name: abcd" },
+ { SYSTEM_KEY_FILE_PREFIX + "abcd",
+ "Couldn't parse key file name: " + SYSTEM_KEY_FILE_PREFIX + "abcd" },
+ // Add more test cases here
+ });
+ }
+
+ @Test
+ public void test() throws Exception {
+ FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName);
+
+ IOException ex = assertThrows(IOException.class,
+ () -> SystemKeyAccessor.extractSystemKeySeqNum(mockFileStatus.getPath()));
+ assertEquals(expectedErrorMessage, ex.getMessage());
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestManagerForErrors extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagerForErrors.class);
+
+ private static final String CLUSTER_ID = "clusterId";
+
+ @Mock
+ ManagedKeyProvider mockKeyProvide;
+ @Mock
+ MasterFileSystem masterFS;
+
+ private MockSystemKeyManager manager;
+ private AutoCloseable closeableMocks;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ closeableMocks = MockitoAnnotations.openMocks(this);
+
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
+ ClusterId clusterId = mock(ClusterId.class);
+ when(mockMaster.getMasterFileSystem()).thenReturn(masterFS);
+ when(masterFS.getClusterId()).thenReturn(clusterId);
+ when(clusterId.toString()).thenReturn(CLUSTER_ID);
+ when(masterFS.getFileSystem()).thenReturn(mockFileSystem);
+
+ manager = new MockSystemKeyManager(mockMaster, mockKeyProvide);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ closeableMocks.close();
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_WithNoSystemKeys() throws Exception {
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(null);
+
+ IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized);
+ assertEquals("Failed to get system key for cluster id: " + CLUSTER_ID, ex.getMessage());
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_WithNoNonActiveKey() throws Exception {
+ String metadata = "key-metadata";
+ ManagedKeyData keyData = mock(ManagedKeyData.class);
+ when(keyData.getKeyState()).thenReturn(INACTIVE);
+ when(keyData.getKeyMetadata()).thenReturn(metadata);
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
+
+ IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized);
+ assertEquals(
+ "System key is expected to be ACTIVE but it is: INACTIVE for metadata: " + metadata,
+ ex.getMessage());
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_WithInvalidMetadata() throws Exception {
+ ManagedKeyData keyData = mock(ManagedKeyData.class);
+ when(keyData.getKeyState()).thenReturn(ACTIVE);
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
+
+ IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized);
+ assertEquals("System key is expected to have metadata but it is null", ex.getMessage());
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_WithSaveFailure() throws Exception {
+ String metadata = "key-metadata";
+ ManagedKeyData keyData = mock(ManagedKeyData.class);
+ when(keyData.getKeyState()).thenReturn(ACTIVE);
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
+ when(keyData.getKeyMetadata()).thenReturn(metadata);
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
+ Path rootDir = CommonFSUtils.getRootDir(conf);
+ when(masterFS.getTempDir()).thenReturn(rootDir);
+ FSDataOutputStream mockStream = mock(FSDataOutputStream.class);
+ when(mockFileSystem.create(any())).thenReturn(mockStream);
+ when(mockFileSystem.rename(any(), any())).thenReturn(false);
+
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, manager::ensureSystemKeyInitialized);
+ assertEquals("Failed to generate or save System Key", ex.getMessage());
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_RaceCondition() throws Exception {
+ String metadata = "key-metadata";
+ ManagedKeyData keyData = mock(ManagedKeyData.class);
+ when(keyData.getKeyState()).thenReturn(ACTIVE);
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
+ when(keyData.getKeyMetadata()).thenReturn(metadata);
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
+ Path rootDir = CommonFSUtils.getRootDir(conf);
+ when(masterFS.getTempDir()).thenReturn(rootDir);
+ FSDataOutputStream mockStream = mock(FSDataOutputStream.class);
+ when(mockFileSystem.create(any())).thenReturn(mockStream);
+ when(mockFileSystem.rename(any(), any())).thenReturn(false);
+ String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
+ FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName);
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0],
+ new FileStatus[] { mockFileStatus });
+
+ manager.ensureSystemKeyInitialized();
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAccessorMisc extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAccessorMisc.class);
+
+ @Test
+ public void testLoadSystemKeySuccess() throws Exception {
+ Path testPath = new Path("/test/key/path");
+ String testMetadata = "test-metadata";
+
+ // Create test key data
+ Key testKey = new SecretKeySpec("test-key-bytes".getBytes(), "AES");
+ ManagedKeyData testKeyData = new ManagedKeyData("custodian".getBytes(), "namespace", testKey,
+ ManagedKeyState.ACTIVE, testMetadata, 1000L);
+
+ // Mock key provider
+ ManagedKeyProvider realProvider = mock(ManagedKeyProvider.class);
+ when(realProvider.unwrapKey(testMetadata, null)).thenReturn(testKeyData);
+
+ // Create testable SystemKeyAccessor that overrides both loadKeyMetadata and getKeyProvider
+ SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) {
+ @Override
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ assertEquals(testPath, keyPath);
+ return testMetadata;
+ }
+
+ @Override
+ public ManagedKeyProvider getKeyProvider() {
+ return realProvider;
+ }
+ };
+
+ ManagedKeyData result = testAccessor.loadSystemKey(testPath);
+ assertEquals(testKeyData, result);
+
+ // Verify the key provider was called correctly
+ verify(realProvider).unwrapKey(testMetadata, null);
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testLoadSystemKeyNullResult() throws Exception {
+ Path testPath = new Path("/test/key/path");
+ String testMetadata = "test-metadata";
+
+ // Mock key provider to return null
+ ManagedKeyProvider realProvider = mock(ManagedKeyProvider.class);
+ when(realProvider.unwrapKey(testMetadata, null)).thenReturn(null);
+
+ SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) {
+ @Override
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ assertEquals(testPath, keyPath);
+ return testMetadata;
+ }
+
+ @Override
+ public ManagedKeyProvider getKeyProvider() {
+ return realProvider;
+ }
+ };
+
+ testAccessor.loadSystemKey(testPath);
+ }
+
+ @Test
+ public void testExtractSystemKeySeqNumValid() throws Exception {
+ Path testPath1 = new Path(SYSTEM_KEY_FILE_PREFIX + "1");
+ Path testPath123 = new Path(SYSTEM_KEY_FILE_PREFIX + "123");
+ Path testPathMax = new Path(SYSTEM_KEY_FILE_PREFIX + Integer.MAX_VALUE);
+
+ assertEquals(1, SystemKeyAccessor.extractSystemKeySeqNum(testPath1));
+ assertEquals(123, SystemKeyAccessor.extractSystemKeySeqNum(testPath123));
+ assertEquals(Integer.MAX_VALUE, SystemKeyAccessor.extractSystemKeySeqNum(testPathMax));
+ }
+
+ @Test(expected = IOException.class)
+ public void testGetAllSystemKeyFilesIOException() throws Exception {
+ when(mockFileSystem.globStatus(any())).thenThrow(new IOException("Filesystem error"));
+ systemKeyManager.getAllSystemKeyFiles();
+ }
+
+ @Test(expected = IOException.class)
+ public void testLoadSystemKeyIOExceptionFromMetadata() throws Exception {
+ Path testPath = new Path("/test/key/path");
+
+ SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) {
+ @Override
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ assertEquals(testPath, keyPath);
+ throw new IOException("Metadata read failed");
+ }
+
+ @Override
+ public ManagedKeyProvider getKeyProvider() {
+ return mock(ManagedKeyProvider.class);
+ }
+ };
+
+ testAccessor.loadSystemKey(testPath);
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testLoadSystemKeyProviderException() throws Exception {
+ Path testPath = new Path("/test/key/path");
+ String testMetadata = "test-metadata";
+
+ SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) {
+ @Override
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ assertEquals(testPath, keyPath);
+ return testMetadata;
+ }
+
+ @Override
+ public ManagedKeyProvider getKeyProvider() {
+ throw new RuntimeException("Key provider not available");
+ }
+ };
+
+ testAccessor.loadSystemKey(testPath);
+ }
+
+ @Test
+ public void testExtractSystemKeySeqNumBoundaryValues() throws Exception {
+ // Test boundary values
+ Path testPath0 = new Path(SYSTEM_KEY_FILE_PREFIX + "0");
+ Path testPathMin = new Path(SYSTEM_KEY_FILE_PREFIX + Integer.MIN_VALUE);
+
+ assertEquals(0, SystemKeyAccessor.extractSystemKeySeqNum(testPath0));
+ assertEquals(Integer.MIN_VALUE, SystemKeyAccessor.extractSystemKeySeqNum(testPathMin));
+ }
+
+ @Test
+ public void testExtractKeySequenceEdgeCases() throws Exception {
+ // Test various edge cases for extractKeySequence
+ Path validZero = new Path(SYSTEM_KEY_FILE_PREFIX + "0");
+ Path validNegative = new Path(SYSTEM_KEY_FILE_PREFIX + "-1");
+
+ // Valid cases should still work
+ assertEquals(0, SystemKeyAccessor.extractKeySequence(validZero));
+ assertEquals(-1, SystemKeyAccessor.extractKeySequence(validNegative));
+ }
+
+ @Test
+ public void testCreateCacheFactoryMethod() {
+ // Test static factory method
+ }
+
+ @Test
+ public void testCreateCacheWithNoKeys() {
+ // Test behavior when no system keys are available
+ }
+ }
+
+ private static class MockSystemKeyManager extends SystemKeyManager {
+ private final ManagedKeyProvider keyProvider;
+
+ public MockSystemKeyManager(MasterServices master, ManagedKeyProvider keyProvider)
+ throws IOException {
+ super(master);
+ this.keyProvider = keyProvider;
+ // systemKeyDir = mock(Path.class);
+ }
+
+ @Override
+ public ManagedKeyProvider getKeyProvider() {
+ return keyProvider;
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
new file mode 100644
index 000000000000..54bfb5e0a120
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.security.Key;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase;
+import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestSystemKeyManager extends ManagedKeyTestBase {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSystemKeyManager.class);
+
+ @Test
+ public void testSystemKeyInitializationAndRotation() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ ManagedKeyProvider keyProvider = Encryption.getManagedKeyProvider(master.getConfiguration());
+ assertNotNull(keyProvider);
+ assertTrue(keyProvider instanceof ManagedKeyProvider);
+ assertTrue(keyProvider instanceof MockManagedKeyProvider);
+ MockManagedKeyProvider pbeKeyProvider = (MockManagedKeyProvider) keyProvider;
+ ManagedKeyData initialSystemKey = validateInitialState(master, pbeKeyProvider);
+
+ restartSystem();
+ master = TEST_UTIL.getHBaseCluster().getMaster();
+ validateInitialState(master, pbeKeyProvider);
+
+ // Test rotation of cluster key by changing the key that the key provider provides and restart
+ // master.
+ String newAlias = "new_cluster_key";
+ pbeKeyProvider.setClusterKeyAlias(newAlias);
+ Key newCluterKey = MockManagedKeyProvider.generateSecretKey();
+ pbeKeyProvider.setMockedKey(newAlias, newCluterKey, ManagedKeyData.KEY_SPACE_GLOBAL);
+
+ restartSystem();
+ master = TEST_UTIL.getHBaseCluster().getMaster();
+ SystemKeyAccessor systemKeyAccessor = new SystemKeyAccessor(master);
+ assertEquals(2, systemKeyAccessor.getAllSystemKeyFiles().size());
+ SystemKeyCache systemKeyCache = master.getSystemKeyCache();
+ assertEquals(0, Bytes.compareTo(newCluterKey.getEncoded(),
+ systemKeyCache.getLatestSystemKey().getTheKey().getEncoded()));
+ assertEquals(initialSystemKey,
+ systemKeyAccessor.loadSystemKey(systemKeyAccessor.getAllSystemKeyFiles().get(1)));
+ assertEquals(initialSystemKey,
+ systemKeyCache.getSystemKeyByChecksum(initialSystemKey.getKeyChecksum()));
+ }
+
+ @Test
+ public void testWithInvalidSystemKey() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ ManagedKeyProvider keyProvider = Encryption.getManagedKeyProvider(master.getConfiguration());
+ MockManagedKeyProvider pbeKeyProvider = (MockManagedKeyProvider) keyProvider;
+
+ // Test startup failure when the cluster key is INACTIVE
+ SystemKeyManager tmpCKM = new SystemKeyManager(master);
+ tmpCKM.ensureSystemKeyInitialized();
+ pbeKeyProvider.setMockedKeyState(pbeKeyProvider.getSystemKeyAlias(), ManagedKeyState.INACTIVE);
+ assertThrows(IOException.class, tmpCKM::ensureSystemKeyInitialized);
+ }
+
+ private ManagedKeyData validateInitialState(HMaster master, MockManagedKeyProvider pbeKeyProvider)
+ throws IOException {
+ SystemKeyAccessor systemKeyAccessor = new SystemKeyAccessor(master);
+ assertEquals(1, systemKeyAccessor.getAllSystemKeyFiles().size());
+ SystemKeyCache systemKeyCache = master.getSystemKeyCache();
+ assertNotNull(systemKeyCache);
+ ManagedKeyData clusterKey = systemKeyCache.getLatestSystemKey();
+ assertEquals(pbeKeyProvider.getSystemKey(master.getClusterId().getBytes()), clusterKey);
+ assertEquals(clusterKey, systemKeyCache.getSystemKeyByChecksum(clusterKey.getKeyChecksum()));
+ return clusterKey;
+ }
+
+ private void restartSystem() throws Exception {
+ TEST_UTIL.shutdownMiniHBaseCluster();
+ Thread.sleep(2000);
+ TEST_UTIL.restartHBaseCluster(1);
+ TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index da1bc04d7e03..ab99c55e6255 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
@@ -223,5 +224,10 @@ public FileSystem getFileSystem() {
throw new UncheckedIOException(e);
}
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
index 0526fd3ba70c..9ea11f732310 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
@@ -26,12 +26,12 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
@@ -53,7 +53,7 @@ public class MasterRegionTestBase {
protected DirScanPool logCleanerPool;
- protected Server server;
+ protected MasterServices server;
protected static byte[] CF1 = Bytes.toBytes("f1");
@@ -96,7 +96,7 @@ protected final void createMasterRegion() throws IOException {
choreService = new ChoreService(getClass().getSimpleName());
hfileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf);
logCleanerPool = DirScanPool.getLogCleanerScanPool(conf);
- server = mock(Server.class);
+ server = mock(MasterServices.class);
when(server.getConfiguration()).thenReturn(conf);
when(server.getServerName())
.thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
index 8f11cc415058..80792d4b276d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
@@ -40,7 +40,6 @@
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -49,6 +48,7 @@
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
@@ -119,7 +119,7 @@ public static void tearDown() throws IOException {
}
private MasterRegion createMasterRegion(ServerName serverName) throws IOException {
- Server server = mock(Server.class);
+ MasterServices server = mock(MasterServices.class);
when(server.getConfiguration()).thenReturn(HFILE_UTIL.getConfiguration());
when(server.getServerName()).thenReturn(serverName);
MasterRegionParams params = new MasterRegionParams();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
index 3c55696080e3..779ca4dac6c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
+import org.apache.hadoop.hbase.master.MockNoopMasterServices;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
import org.apache.hadoop.hbase.procedure2.store.ProcedureStorePerformanceEvaluation;
@@ -31,26 +32,18 @@
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.MockServer;
import org.apache.hadoop.hbase.util.Pair;
public class RegionProcedureStorePerformanceEvaluation
extends ProcedureStorePerformanceEvaluation {
- private static final class DummyServer extends MockServer {
-
- private final Configuration conf;
+ private static final class DummyServer extends MockNoopMasterServices {
private final ServerName serverName =
ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime());
public DummyServer(Configuration conf) {
- this.conf = conf;
- }
-
- @Override
- public Configuration getConfiguration() {
- return conf;
+ super(conf);
}
@Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java
index c05eb9a8ce3e..dac4cc1e0e73 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
-import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.LoadCounter;
@@ -51,7 +51,7 @@ public void setUp() throws IOException {
conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false);
Path testDir = htu.getDataTestDir();
CommonFSUtils.setRootDir(htu.getConfiguration(), testDir);
- Server server = RegionProcedureStoreTestHelper.mockServer(conf);
+ MasterServices server = RegionProcedureStoreTestHelper.mockServer(conf);
region = MasterRegionFactory.create(server);
store = RegionProcedureStoreTestHelper.createStore(server, region, new LoadCounter());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
index 0607d9d3e924..cc90d6e22b61 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.procedure2.store.LeaseRecovery;
import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoader;
@@ -36,8 +37,8 @@ final class RegionProcedureStoreTestHelper {
private RegionProcedureStoreTestHelper() {
}
- static Server mockServer(Configuration conf) {
- Server server = mock(Server.class);
+ static MasterServices mockServer(Configuration conf) {
+ MasterServices server = mock(MasterServices.class);
when(server.getConfiguration()).thenReturn(conf);
when(server.getServerName())
.thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
index 7a6fee5f314c..70b93487c12b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
@@ -35,9 +35,9 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.assignment.AssignProcedure;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
@@ -66,7 +66,7 @@ public class TestRegionProcedureStoreMigration {
private HBaseCommonTestingUtil htu;
- private Server server;
+ private MasterServices server;
private MasterRegion region;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 443019bee808..9b6a5d80c9ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -856,6 +857,11 @@ public Connection createConnection(Configuration conf) throws IOException {
public AsyncClusterConnection getAsyncClusterConnection() {
return null;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
static class CustomHeapMemoryTuner implements HeapMemoryTuner {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java
index ca7e20f5869d..9efce81d9573 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java
@@ -18,16 +18,32 @@
package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Optional;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.ipc.RpcCall;
import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -35,6 +51,15 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BooleanMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyEntryRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ManagedKeyRequest;
+
/**
* Test parts of {@link RSRpcServices}
*/
@@ -69,4 +94,294 @@ public void testRegionScannerHolderToString() throws UnknownHostException {
null, null, false, false, clientIpAndPort, userNameTest);
LOG.info("rsh: {}", rsh);
}
+
+ /**
+ * Test the refreshSystemKeyCache RPC method that is used to rebuild the system key cache on
+ * region servers when a system key rotation has occurred.
+ */
+ @Test
+ public void testRefreshSystemKeyCache() throws Exception {
+ // Create mocks
+ HRegionServer mockServer = mock(HRegionServer.class);
+ Configuration conf = HBaseConfiguration.create();
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ when(mockServer.isOnline()).thenReturn(true);
+ when(mockServer.isAborted()).thenReturn(false);
+ when(mockServer.isStopped()).thenReturn(false);
+ when(mockServer.isDataFileSystemOk()).thenReturn(true);
+ when(mockServer.getFileSystem()).thenReturn(mockFs);
+
+ // Create RSRpcServices
+ RSRpcServices rpcServices = new RSRpcServices(mockServer);
+
+ // Create request
+ EmptyMsg request = EmptyMsg.getDefaultInstance();
+ RpcController controller = mock(RpcController.class);
+
+ // Call the RPC method
+ EmptyMsg response = rpcServices.refreshSystemKeyCache(controller, request);
+
+ // Verify the response is not null
+ assertNotNull("Response should not be null", response);
+
+ // Verify that rebuildSystemKeyCache was called on the server
+ verify(mockServer).rebuildSystemKeyCache();
+
+ LOG.info("refreshSystemKeyCache test completed successfully");
+ }
+
+ /**
+ * Test that refreshSystemKeyCache throws ServiceException when server is not online
+ */
+ @Test
+ public void testRefreshSystemKeyCacheWhenServerStopped() throws Exception {
+ // Create mocks
+ HRegionServer mockServer = mock(HRegionServer.class);
+ Configuration conf = HBaseConfiguration.create();
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ when(mockServer.isOnline()).thenReturn(true);
+ when(mockServer.isAborted()).thenReturn(false);
+ when(mockServer.isStopped()).thenReturn(true); // Server is stopped
+ when(mockServer.isDataFileSystemOk()).thenReturn(true);
+ when(mockServer.getFileSystem()).thenReturn(mockFs);
+
+ // Create RSRpcServices
+ RSRpcServices rpcServices = new RSRpcServices(mockServer);
+
+ // Create request
+ EmptyMsg request = EmptyMsg.getDefaultInstance();
+ RpcController controller = mock(RpcController.class);
+
+ // Call the RPC method and expect ServiceException
+ try {
+ rpcServices.refreshSystemKeyCache(controller, request);
+ fail("Expected ServiceException when server is stopped");
+ } catch (ServiceException e) {
+ // Expected
+ assertTrue("Exception should mention server stopping",
+ e.getCause().getMessage().contains("stopping"));
+ LOG.info("Correctly threw ServiceException when server is stopped");
+ }
+ }
+
+ /**
+ * Test that refreshSystemKeyCache throws ServiceException when rebuildSystemKeyCache fails
+ */
+ @Test
+ public void testRefreshSystemKeyCacheWhenRebuildFails() throws Exception {
+ // Create mocks
+ HRegionServer mockServer = mock(HRegionServer.class);
+ Configuration conf = HBaseConfiguration.create();
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ when(mockServer.isOnline()).thenReturn(true);
+ when(mockServer.isAborted()).thenReturn(false);
+ when(mockServer.isStopped()).thenReturn(false);
+ when(mockServer.isDataFileSystemOk()).thenReturn(true);
+ when(mockServer.getFileSystem()).thenReturn(mockFs);
+
+ // Make rebuildSystemKeyCache throw IOException
+ IOException testException = new IOException("Test failure rebuilding cache");
+ doThrow(testException).when(mockServer).rebuildSystemKeyCache();
+
+ // Create RSRpcServices
+ RSRpcServices rpcServices = new RSRpcServices(mockServer);
+
+ // Create request
+ EmptyMsg request = EmptyMsg.getDefaultInstance();
+ RpcController controller = mock(RpcController.class);
+
+ // Call the RPC method and expect ServiceException
+ try {
+ rpcServices.refreshSystemKeyCache(controller, request);
+ fail("Expected ServiceException when rebuildSystemKeyCache fails");
+ } catch (ServiceException e) {
+ // Expected
+ assertEquals("Test failure rebuilding cache", e.getCause().getMessage());
+ LOG.info("Correctly threw ServiceException when rebuildSystemKeyCache fails");
+ }
+
+ // Verify that rebuildSystemKeyCache was called
+ verify(mockServer).rebuildSystemKeyCache();
+ }
+
+ /**
+ * Test the ejectManagedKeyDataCacheEntry RPC method that is used to eject a specific managed key
+ * entry from the cache on region servers.
+ */
+ @Test
+ public void testEjectManagedKeyDataCacheEntry() throws Exception {
+ // Create mocks
+ HRegionServer mockServer = mock(HRegionServer.class);
+ Configuration conf = HBaseConfiguration.create();
+ FileSystem mockFs = mock(FileSystem.class);
+ KeyManagementService mockKeyService = mock(KeyManagementService.class);
+ ManagedKeyDataCache mockCache = mock(ManagedKeyDataCache.class);
+
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ when(mockServer.isOnline()).thenReturn(true);
+ when(mockServer.isAborted()).thenReturn(false);
+ when(mockServer.isStopped()).thenReturn(false);
+ when(mockServer.isDataFileSystemOk()).thenReturn(true);
+ when(mockServer.getFileSystem()).thenReturn(mockFs);
+ when(mockServer.getKeyManagementService()).thenReturn(mockKeyService);
+ when(mockKeyService.getManagedKeyDataCache()).thenReturn(mockCache);
+ // Mock the ejectKey to return true
+ when(mockCache.ejectKey(any(), any(), any())).thenReturn(true);
+
+ // Create RSRpcServices
+ RSRpcServices rpcServices = new RSRpcServices(mockServer);
+
+ // Create request
+ byte[] keyCustodian = Bytes.toBytes("testCustodian");
+ String keyNamespace = "testNamespace";
+ String keyMetadata = "testMetadata";
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash(keyMetadata);
+
+ ManagedKeyEntryRequest request = ManagedKeyEntryRequest.newBuilder()
+ .setKeyCustNs(ManagedKeyRequest.newBuilder().setKeyCust(ByteString.copyFrom(keyCustodian))
+ .setKeyNamespace(keyNamespace).build())
+ .setKeyMetadataHash(ByteString.copyFrom(keyMetadataHash)).build();
+
+ RpcController controller = mock(RpcController.class);
+
+ // Call the RPC method
+ BooleanMsg response = rpcServices.ejectManagedKeyDataCacheEntry(controller, request);
+
+ // Verify the response is not null and contains the expected boolean value
+ assertNotNull("Response should not be null", response);
+ assertTrue("Response should indicate key was ejected", response.getBoolMsg());
+
+ // Verify that ejectKey was called on the cache
+ verify(mockCache).ejectKey(keyCustodian, keyNamespace, keyMetadataHash);
+
+ LOG.info("ejectManagedKeyDataCacheEntry test completed successfully");
+ }
+
+ /**
+ * Test that ejectManagedKeyDataCacheEntry throws ServiceException when server is stopped
+ */
+ @Test
+ public void testEjectManagedKeyDataCacheEntryWhenServerStopped() throws Exception {
+ // Create mocks
+ HRegionServer mockServer = mock(HRegionServer.class);
+ Configuration conf = HBaseConfiguration.create();
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ when(mockServer.isOnline()).thenReturn(true);
+ when(mockServer.isAborted()).thenReturn(false);
+ when(mockServer.isStopped()).thenReturn(true); // Server is stopped
+ when(mockServer.isDataFileSystemOk()).thenReturn(true);
+ when(mockServer.getFileSystem()).thenReturn(mockFs);
+
+ // Create RSRpcServices
+ RSRpcServices rpcServices = new RSRpcServices(mockServer);
+
+ // Create request
+ byte[] keyCustodian = Bytes.toBytes("testCustodian");
+ String keyNamespace = "testNamespace";
+ String keyMetadata = "testMetadata";
+ byte[] keyMetadataHash = ManagedKeyData.constructMetadataHash(keyMetadata);
+
+ ManagedKeyEntryRequest request = ManagedKeyEntryRequest.newBuilder()
+ .setKeyCustNs(ManagedKeyRequest.newBuilder().setKeyCust(ByteString.copyFrom(keyCustodian))
+ .setKeyNamespace(keyNamespace).build())
+ .setKeyMetadataHash(ByteString.copyFrom(keyMetadataHash)).build();
+
+ RpcController controller = mock(RpcController.class);
+
+ // Call the RPC method and expect ServiceException
+ try {
+ rpcServices.ejectManagedKeyDataCacheEntry(controller, request);
+ fail("Expected ServiceException when server is stopped");
+ } catch (ServiceException e) {
+ // Expected
+ assertTrue("Exception should mention server stopping",
+ e.getCause().getMessage().contains("stopping"));
+ LOG.info("Correctly threw ServiceException when server is stopped");
+ }
+ }
+
+ /**
+ * Test the clearManagedKeyDataCache RPC method that is used to clear all cached entries in the
+ * ManagedKeyDataCache.
+ */
+ @Test
+ public void testClearManagedKeyDataCache() throws Exception {
+ // Create mocks
+ HRegionServer mockServer = mock(HRegionServer.class);
+ Configuration conf = HBaseConfiguration.create();
+ FileSystem mockFs = mock(FileSystem.class);
+ KeyManagementService mockKeyService = mock(KeyManagementService.class);
+ ManagedKeyDataCache mockCache = mock(ManagedKeyDataCache.class);
+
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ when(mockServer.isOnline()).thenReturn(true);
+ when(mockServer.isAborted()).thenReturn(false);
+ when(mockServer.isStopped()).thenReturn(false);
+ when(mockServer.isDataFileSystemOk()).thenReturn(true);
+ when(mockServer.getFileSystem()).thenReturn(mockFs);
+ when(mockServer.getKeyManagementService()).thenReturn(mockKeyService);
+ when(mockKeyService.getManagedKeyDataCache()).thenReturn(mockCache);
+
+ // Create RSRpcServices
+ RSRpcServices rpcServices = new RSRpcServices(mockServer);
+
+ // Create request
+ EmptyMsg request = EmptyMsg.getDefaultInstance();
+ RpcController controller = mock(RpcController.class);
+
+ // Call the RPC method
+ EmptyMsg response = rpcServices.clearManagedKeyDataCache(controller, request);
+
+ // Verify the response is not null
+ assertNotNull("Response should not be null", response);
+
+ // Verify that clearCache was called on the cache
+ verify(mockCache).clearCache();
+
+ LOG.info("clearManagedKeyDataCache test completed successfully");
+ }
+
+ /**
+ * Test that clearManagedKeyDataCache throws ServiceException when server is stopped
+ */
+ @Test
+ public void testClearManagedKeyDataCacheWhenServerStopped() throws Exception {
+ // Create mocks
+ HRegionServer mockServer = mock(HRegionServer.class);
+ Configuration conf = HBaseConfiguration.create();
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ when(mockServer.isOnline()).thenReturn(true);
+ when(mockServer.isAborted()).thenReturn(false);
+ when(mockServer.isStopped()).thenReturn(true); // Server is stopped
+ when(mockServer.isDataFileSystemOk()).thenReturn(true);
+ when(mockServer.getFileSystem()).thenReturn(mockFs);
+
+ // Create RSRpcServices
+ RSRpcServices rpcServices = new RSRpcServices(mockServer);
+
+ // Create request
+ EmptyMsg request = EmptyMsg.getDefaultInstance();
+ RpcController controller = mock(RpcController.class);
+
+ // Call the RPC method and expect ServiceException
+ try {
+ rpcServices.clearManagedKeyDataCache(controller, request);
+ fail("Expected ServiceException when server is stopped");
+ } catch (ServiceException e) {
+ // Expected
+ assertTrue("Exception should mention server stopping",
+ e.getCause().getMessage().contains("stopping"));
+ LOG.info("Correctly threw ServiceException when server is stopped");
+ }
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java
index 6b372fa99350..1a4ba7ac99cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java
@@ -125,7 +125,8 @@ public void test() throws Exception {
Path rootDir = TEST_UTIL.getDataTestDir();
Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable());
HRegionFileSystem.createRegionOnFileSystem(CONF, TEST_UTIL.getTestFileSystem(), tableDir, info);
- region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs);
+ region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs,
+ rs.getKeyManagementService());
// create some recovered.edits
final WALFactory wals = new WALFactory(CONF, method);
try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
index ffc4e17f6f8b..29040ad58bec 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
@@ -122,7 +122,8 @@ public void testOpenErrorMessageReference() throws IOException {
storeFileTrackerForTest.createReference(r, p);
StoreFileInfo sfi = storeFileTrackerForTest.getStoreFileInfo(p, true);
try {
- ReaderContext context = sfi.createReaderContext(false, 1000, ReaderType.PREAD);
+ ReaderContext context =
+ sfi.createReaderContext(false, 1000, ReaderType.PREAD, null, null, null);
sfi.createReader(context, null);
throw new IllegalStateException();
} catch (FileNotFoundException fnfe) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
index a59b2966b89d..60acb8a6acb4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
@@ -999,4 +999,21 @@ public boolean replicationPeerModificationSwitch(boolean on, boolean drainProced
public boolean isReplicationPeerModificationEnabled() throws IOException {
return admin.isReplicationPeerModificationEnabled();
}
+
+ @Override
+ public void refreshSystemKeyCacheOnServers(List regionServers) throws IOException {
+ admin.refreshSystemKeyCacheOnServers(regionServers);
+ }
+
+ @Override
+ public void ejectManagedKeyDataCacheEntryOnServers(List regionServers,
+ byte[] keyCustodian, String keyNamespace, String keyMetadata) throws IOException {
+ admin.ejectManagedKeyDataCacheEntryOnServers(regionServers, keyCustodian, keyNamespace,
+ keyMetadata);
+ }
+
+ @Override
+ public void clearManagedKeyDataCacheOnServers(List regionServers) throws IOException {
+ admin.clearManagedKeyDataCacheOnServers(regionServers);
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java
new file mode 100644
index 000000000000..e648d8a1c217
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java
@@ -0,0 +1,1105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.security;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
+import java.util.Arrays;
+import java.util.Collection;
+import javax.crypto.spec.SecretKeySpec;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.io.crypto.Cipher;
+import org.apache.hadoop.hbase.io.crypto.CipherProvider;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.MockAesKeyProvider;
+import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.testclassification.SecurityTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Suite;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestSecurityUtil.TestBasic.class,
+ TestSecurityUtil.TestCreateEncryptionContext_ForWrites.class,
+ TestSecurityUtil.TestCreateEncryptionContext_ForReads.class,
+ TestSecurityUtil.TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException.class, })
+@Category({ SecurityTests.class, SmallTests.class })
+public class TestSecurityUtil {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSecurityUtil.class);
+
+ // Test constants to eliminate magic strings and improve maintainability
+ protected static final String TEST_NAMESPACE = "test-namespace";
+ protected static final String TEST_FAMILY = "test-family";
+ protected static final String HBASE_KEY = "hbase";
+ protected static final String TEST_KEK_METADATA = "test-kek-metadata";
+ protected static final long TEST_KEK_CHECKSUM = 12345L;
+ protected static final String TEST_KEY_16_BYTE = "test-key-16-byte";
+ protected static final String TEST_DEK_16_BYTE = "test-dek-16-byte";
+ protected static final String INVALID_KEY_DATA = "invalid-key-data";
+ protected static final String INVALID_WRAPPED_KEY_DATA = "invalid-wrapped-key-data";
+ protected static final String INVALID_SYSTEM_KEY_DATA = "invalid-system-key-data";
+ protected static final String UNKNOWN_CIPHER = "UNKNOWN_CIPHER";
+ protected static final String AES_CIPHER = "AES";
+ protected static final String DES_CIPHER = "DES";
+
+ protected Configuration conf;
+ protected HBaseTestingUtil testUtil;
+ protected Path testPath;
+ protected ColumnFamilyDescriptor mockFamily;
+ protected TableDescriptor mockTableDescriptor;
+ protected ManagedKeyDataCache mockManagedKeyDataCache;
+ protected SystemKeyCache mockSystemKeyCache;
+ protected FixedFileTrailer mockTrailer;
+ protected ManagedKeyData mockManagedKeyData;
+ protected Key testKey;
+ protected byte[] testWrappedKey;
+ protected Key kekKey;
+ protected String testTableNamespace;
+
+ /**
+ * Configuration builder for setting up different encryption test scenarios.
+ */
+ protected static class TestConfigBuilder {
+ private boolean encryptionEnabled = true;
+ private boolean keyManagementEnabled = false;
+ private boolean localKeyGenEnabled = false;
+ private String cipherProvider = "org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider";
+ private String keyProvider = MockAesKeyProvider.class.getName();
+ private String masterKeyName = HBASE_KEY;
+
+ public TestConfigBuilder withEncryptionEnabled(boolean enabled) {
+ this.encryptionEnabled = enabled;
+ return this;
+ }
+
+ public TestConfigBuilder withKeyManagement(boolean localKeyGen) {
+ this.keyManagementEnabled = true;
+ this.localKeyGenEnabled = localKeyGen;
+ return this;
+ }
+
+ public TestConfigBuilder withNullCipherProvider() {
+ this.cipherProvider = NullCipherProvider.class.getName();
+ return this;
+ }
+
+ public void apply(Configuration conf) {
+ conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, encryptionEnabled);
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, keyProvider);
+ conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, masterKeyName);
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "true");
+ conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, cipherProvider);
+
+ if (keyManagementEnabled) {
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY,
+ localKeyGenEnabled);
+ } else {
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+ }
+ }
+ }
+
+ protected static TestConfigBuilder configBuilder() {
+ return new TestConfigBuilder();
+ }
+
+ protected void setUpEncryptionConfig() {
+ // Set up real encryption configuration using default AES cipher
+ conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, true);
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockAesKeyProvider.class.getName());
+ conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, HBASE_KEY);
+ // Enable key caching
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "true");
+ // Use DefaultCipherProvider for real AES encryption functionality
+ conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY,
+ "org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider");
+ }
+
+ protected void setUpEncryptionConfigWithNullCipher() {
+ configBuilder().withNullCipherProvider().apply(conf);
+ }
+
+ // ==== Mock Setup Helpers ====
+
+ protected void setupManagedKeyDataCache(String namespace, ManagedKeyData keyData) {
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(namespace))).thenReturn(keyData);
+ }
+
+ protected void setupManagedKeyDataCache(String namespace, String globalSpace,
+ ManagedKeyData keyData) {
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(namespace))).thenReturn(null);
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(globalSpace))).thenReturn(keyData);
+ }
+
+ protected void setupTrailerMocks(byte[] keyBytes, String metadata, Long checksum,
+ String namespace) {
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(metadata);
+ if (checksum != null) {
+ when(mockTrailer.getKEKChecksum()).thenReturn(checksum);
+ }
+ when(mockTrailer.getKeyNamespace()).thenReturn(namespace);
+ }
+
+ protected void setupSystemKeyCache(Long checksum, ManagedKeyData keyData) {
+ when(mockSystemKeyCache.getSystemKeyByChecksum(checksum)).thenReturn(keyData);
+ }
+
+ protected void setupSystemKeyCache(ManagedKeyData latestKey) {
+ when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(latestKey);
+ }
+
+ protected void setupManagedKeyDataCacheEntry(String namespace, String metadata, byte[] keyBytes,
+ ManagedKeyData keyData) throws IOException, KeyException {
+ when(mockManagedKeyDataCache.getEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(namespace), eq(metadata), eq(keyBytes))).thenReturn(keyData);
+ }
+
+ // ==== Exception Testing Helpers ====
+
+ protected void assertExceptionContains(Class expectedType,
+ String expectedMessage, Runnable testCode) {
+ T exception = assertThrows(expectedType, () -> testCode.run());
+ assertTrue("Exception message should contain: " + expectedMessage,
+ exception.getMessage().contains(expectedMessage));
+ }
+
+ protected void assertEncryptionContextThrowsForWrites(Class extends Exception> expectedType,
+ String expectedMessage) {
+ Exception exception = assertThrows(Exception.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, mockTableDescriptor, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+ });
+ assertTrue("Expected exception type: " + expectedType.getName() + ", but got: "
+ + exception.getClass().getName(), expectedType.isInstance(exception));
+ assertTrue("Exception message should contain: " + expectedMessage,
+ exception.getMessage().contains(expectedMessage));
+ }
+
+ protected void assertEncryptionContextThrowsForReads(Class extends Exception> expectedType,
+ String expectedMessage) {
+ Exception exception = assertThrows(Exception.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+ assertTrue("Expected exception type: " + expectedType.getName() + ", but got: "
+ + exception.getClass().getName(), expectedType.isInstance(exception));
+ assertTrue("Exception message should contain: " + expectedMessage,
+ exception.getMessage().contains(expectedMessage));
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ conf = HBaseConfiguration.create();
+ testUtil = new HBaseTestingUtil(conf);
+ testPath = testUtil.getDataTestDir("test-file");
+
+ // Setup mocks (only for objects that don't have encryption logic)
+ mockFamily = mock(ColumnFamilyDescriptor.class);
+ mockTableDescriptor = mock(TableDescriptor.class);
+ mockManagedKeyDataCache = mock(ManagedKeyDataCache.class);
+ mockSystemKeyCache = mock(SystemKeyCache.class);
+ mockTrailer = mock(FixedFileTrailer.class);
+ mockManagedKeyData = mock(ManagedKeyData.class);
+
+ // Use a real test key with exactly 16 bytes for AES-128
+ testKey = new SecretKeySpec(TEST_KEY_16_BYTE.getBytes(), AES_CIPHER);
+
+ // Configure mocks
+ when(mockFamily.getEncryptionType()).thenReturn(AES_CIPHER);
+ when(mockFamily.getNameAsString()).thenReturn(TEST_FAMILY);
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(null); // Default to null for fallback
+ // logic
+ when(mockTableDescriptor.getTableName()).thenReturn(TableName.valueOf("test:table"));
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ testTableNamespace = KeyNamespaceUtil.constructKeyNamespace(mockTableDescriptor, mockFamily);
+
+ // Set up default encryption config
+ setUpEncryptionConfig();
+
+ // Create test wrapped key
+ KeyProvider keyProvider = Encryption.getKeyProvider(conf);
+ kekKey = keyProvider.getKey(HBASE_KEY);
+ Key key = keyProvider.getKey(TEST_DEK_16_BYTE);
+ testWrappedKey = EncryptionUtil.wrapKey(conf, null, key, kekKey);
+ }
+
+ private static byte[] createRandomWrappedKey(Configuration conf) throws IOException {
+ Cipher cipher = Encryption.getCipher(conf, "AES");
+ Key key = cipher.getRandomKey();
+ return EncryptionUtil.wrapKey(conf, HBASE_KEY, key);
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ SecurityTests.class, SmallTests.class })
+ public static class TestBasic extends TestSecurityUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestBasic.class);
+
+ @Test
+ public void testGetUserFromPrincipal() {
+ // Test with slash separator
+ assertEquals("user1", SecurityUtil.getUserFromPrincipal("user1/host@REALM"));
+ assertEquals("user2", SecurityUtil.getUserFromPrincipal("user2@REALM"));
+
+ // Test with no realm
+ assertEquals("user3", SecurityUtil.getUserFromPrincipal("user3"));
+
+ // Test with multiple slashes
+ assertEquals("user4", SecurityUtil.getUserFromPrincipal("user4/host1/host2@REALM"));
+ }
+
+ @Test
+ public void testGetPrincipalWithoutRealm() {
+ // Test with realm
+ assertEquals("user1/host", SecurityUtil.getPrincipalWithoutRealm("user1/host@REALM"));
+ assertEquals("user2", SecurityUtil.getPrincipalWithoutRealm("user2@REALM"));
+
+ // Test without realm
+ assertEquals("user3", SecurityUtil.getPrincipalWithoutRealm("user3"));
+ assertEquals("user4/host", SecurityUtil.getPrincipalWithoutRealm("user4/host"));
+ }
+
+ @Test
+ public void testIsKeyManagementEnabled() {
+ Configuration conf = HBaseConfiguration.create();
+
+ // Test default behavior (should be false)
+ assertFalse(SecurityUtil.isKeyManagementEnabled(conf));
+
+ // Test with key management enabled
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ assertTrue(SecurityUtil.isKeyManagementEnabled(conf));
+
+ // Test with key management disabled
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+ assertFalse(SecurityUtil.isKeyManagementEnabled(conf));
+ }
+ }
+
+ // Tests for the first createEncryptionContext method (for ColumnFamilyDescriptor)
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ SecurityTests.class, SmallTests.class })
+ public static class TestCreateEncryptionContext_ForWrites extends TestSecurityUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestCreateEncryptionContext_ForWrites.class);
+
+ @Test
+ public void testWithNoEncryptionOnFamily() throws IOException {
+ when(mockFamily.getEncryptionType()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ assertEquals(Encryption.Context.NONE, result);
+ }
+
+ @Test
+ public void testWithEncryptionDisabled() throws IOException {
+ configBuilder().withEncryptionEnabled(false).apply(conf);
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "encryption feature is disabled");
+ }
+
+ @Test
+ public void testWithKeyManagement_LocalKeyGen() throws IOException {
+ configBuilder().withKeyManagement(true).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ }
+
+ @Test
+ public void testWithKeyManagement_NoActiveKey_NoSystemKeyCache() throws IOException {
+ // Test backwards compatibility: when no active key found and system cache is null, should
+ // throw
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, ManagedKeyData.KEY_SPACE_GLOBAL, null);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ // With null system key cache, should still throw IOException
+ Exception exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, mockTableDescriptor, mockFamily,
+ mockManagedKeyDataCache, null);
+ });
+ assertTrue("Should reference system key cache",
+ exception.getMessage().contains("SystemKeyCache"));
+ }
+
+ @Test
+ public void testWithKeyManagement_NoActiveKey_WithSystemKeyCache() throws IOException {
+ // Test backwards compatibility: when no active key found but system cache available, should
+ // work
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, ManagedKeyData.KEY_SPACE_GLOBAL, null);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Should use system key as KEK and generate random DEK
+ assertEquals(mockManagedKeyData, result.getKEKData());
+ }
+
+ @Test
+ public void testWithKeyManagement_LocalKeyGen_WithUnknownKeyCipher() throws IOException {
+ when(mockFamily.getEncryptionType()).thenReturn(UNKNOWN_CIPHER);
+ Key unknownKey = mock(Key.class);
+ when(unknownKey.getAlgorithm()).thenReturn(UNKNOWN_CIPHER);
+ when(mockManagedKeyData.getTheKey()).thenReturn(unknownKey);
+
+ configBuilder().withKeyManagement(true).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ assertEncryptionContextThrowsForWrites(RuntimeException.class,
+ "Cipher 'UNKNOWN_CIPHER' is not");
+ }
+
+ @Test
+ public void testWithKeyManagement_LocalKeyGen_WithKeyAlgorithmMismatch() throws IOException {
+ Key desKey = mock(Key.class);
+ when(desKey.getAlgorithm()).thenReturn(DES_CIPHER);
+ when(mockManagedKeyData.getTheKey()).thenReturn(desKey);
+
+ configBuilder().withKeyManagement(true).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "Encryption for family 'test-family' configured with type 'AES' but key specifies "
+ + "algorithm 'DES'");
+ }
+
+ @Test
+ public void testWithKeyManagement_UseSystemKeyWithNSSpecificActiveKey() throws IOException {
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ setupSystemKeyCache(mockManagedKeyData);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ }
+
+ @Test
+ public void testWithKeyManagement_UseSystemKeyWithoutNSSpecificActiveKey() throws IOException {
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, ManagedKeyData.KEY_SPACE_GLOBAL,
+ mockManagedKeyData);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ }
+
+ @Test
+ public void testWithoutKeyManagement_WithFamilyProvidedKey() throws Exception {
+ byte[] wrappedKey = createRandomWrappedKey(conf);
+ when(mockFamily.getEncryptionKey()).thenReturn(wrappedKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result, false);
+ }
+
+ @Test
+ public void testWithoutKeyManagement_KeyAlgorithmMismatch() throws Exception {
+ // Create a key with different algorithm and wrap it
+ Key differentKey = new SecretKeySpec(TEST_KEY_16_BYTE.getBytes(), DES_CIPHER);
+ byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, HBASE_KEY, differentKey);
+ when(mockFamily.getEncryptionKey()).thenReturn(wrappedDESKey);
+
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "Encryption for family 'test-family' configured with type 'AES' but key specifies "
+ + "algorithm 'DES'");
+ }
+
+ @Test
+ public void testWithUnavailableCipher() throws IOException {
+ when(mockFamily.getEncryptionType()).thenReturn(UNKNOWN_CIPHER);
+ setUpEncryptionConfigWithNullCipher();
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "Cipher 'UNKNOWN_CIPHER' is not available");
+ }
+
+ // ---- New backwards compatibility test scenarios ----
+
+ @Test
+ public void testBackwardsCompatibility_Scenario1_FamilyKeyWithKeyManagement()
+ throws IOException {
+ // Scenario 1: Family has encryption key -> use as DEK, latest STK as KEK
+ when(mockFamily.getEncryptionKey()).thenReturn(testWrappedKey);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that system key is used as KEK
+ assertEquals(mockManagedKeyData, result.getKEKData());
+ }
+
+ @Test
+ public void testBackwardsCompatibility_Scenario2a_ActiveKeyAsDeK() throws IOException {
+ // Scenario 2a: Active key exists, local key gen disabled -> use active key as DEK, latest STK
+ // as KEK
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ ManagedKeyData mockSystemKey = mock(ManagedKeyData.class);
+ when(mockSystemKey.getTheKey()).thenReturn(kekKey);
+ setupSystemKeyCache(mockSystemKey);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that active key is used as DEK and system key as KEK
+ assertEquals(testKey, result.getKey()); // Active key should be the DEK
+ assertEquals(mockSystemKey, result.getKEKData()); // System key should be the KEK
+ }
+
+ @Test
+ public void testBackwardsCompatibility_Scenario2b_ActiveKeyAsKekWithLocalKeyGen()
+ throws IOException {
+ // Scenario 2b: Active key exists, local key gen enabled -> use active key as KEK, generate
+ // random DEK
+ configBuilder().withKeyManagement(true).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that active key is used as KEK and a generated key as DEK
+ assertNotNull("DEK should be generated", result.getKey());
+ assertEquals(mockManagedKeyData, result.getKEKData()); // Active key should be the KEK
+ }
+
+ @Test
+ public void testBackwardsCompatibility_Scenario3a_NoActiveKeyGenerateLocalKey()
+ throws IOException {
+ // Scenario 3: No active key -> generate random DEK, latest STK as KEK
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(TEST_NAMESPACE, ManagedKeyData.KEY_SPACE_GLOBAL, null); // No active
+ // key
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that a random key is generated as DEK and system key as KEK
+ assertNotNull("DEK should be generated", result.getKey());
+ assertEquals(mockManagedKeyData, result.getKEKData()); // System key should be the KEK
+ }
+
+ @Test
+ public void testWithoutKeyManagement_Scenario3b_WithRandomKeyGeneration() throws IOException {
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result, false);
+ // Here system key with a local key gen, so no namespace is set.
+ assertNull(result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRule1_CFKeyNamespaceAttribute() throws IOException {
+ // Test Rule 1: Column family has KEY_NAMESPACE attribute
+ String cfKeyNamespace = "cf-specific-namespace";
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(cfKeyNamespace);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ // Mock managed key data cache to return active key only for CF namespace
+ setupManagedKeyDataCache(cfKeyNamespace, mockManagedKeyData);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that CF-specific namespace was used
+ assertEquals(cfKeyNamespace, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRule2_ConstructedNamespace() throws IOException {
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(null); // No CF namespace
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupSystemKeyCache(mockManagedKeyData);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that constructed namespace was used
+ assertEquals(testTableNamespace, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRule3_TableNameAsNamespace() throws IOException {
+ // Test Rule 3: Use table name as namespace when CF namespace and constructed namespace fail
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(null); // No CF namespace
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ String tableName = "test:table";
+ when(mockTableDescriptor.getTableName()).thenReturn(TableName.valueOf(tableName));
+
+ // Mock cache to fail for CF and constructed namespace, succeed for table name
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(testTableNamespace))).thenReturn(null); // Constructed namespace fails
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(tableName))).thenReturn(mockManagedKeyData); // Table name succeeds
+
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that table name was used as namespace
+ assertEquals(tableName, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRule4_GlobalNamespace() throws IOException {
+ // Test Rule 4: Fall back to global namespace when all other rules fail
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(null); // No CF namespace
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ String tableName = "test:table";
+ when(mockTableDescriptor.getTableName()).thenReturn(TableName.valueOf(tableName));
+
+ // Mock cache to fail for all specific namespaces, succeed only for global
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(testTableNamespace))).thenReturn(null); // Constructed namespace fails
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(tableName))).thenReturn(null); // Table name fails
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(ManagedKeyData.KEY_SPACE_GLOBAL))).thenReturn(mockManagedKeyData); // Global succeeds
+
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that global namespace was used
+ assertEquals(ManagedKeyData.KEY_SPACE_GLOBAL, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRuleOrder() throws IOException {
+ // Test that the rules are tried in the correct order
+ String cfKeyNamespace = "cf-namespace";
+ String tableName = "test:table";
+
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(cfKeyNamespace);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ when(mockTableDescriptor.getTableName()).thenReturn(TableName.valueOf(tableName));
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ // Set up mocks so that CF namespace fails but table name would succeed
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(cfKeyNamespace))).thenReturn(null); // CF namespace fails
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(testTableNamespace))).thenReturn(mockManagedKeyData); // Constructed namespace succeeds
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(tableName))).thenReturn(mockManagedKeyData); // Table name would also succeed
+
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that constructed namespace was used (Rule 2), not table name (Rule 3)
+ assertEquals(testTableNamespace, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testBackwardsCompatibility_Scenario1_FamilyKeyWithoutKeyManagement()
+ throws IOException {
+ // Scenario 1 variation: Family has encryption key but key management disabled -> use as DEK,
+ // no KEK
+ byte[] wrappedKey = createRandomWrappedKey(conf);
+ when(mockFamily.getEncryptionKey()).thenReturn(wrappedKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result, false); // No key management, so no KEK data
+ }
+
+ @Test
+ public void testWithKeyManagement_FamilyKey_UnwrapKeyException() throws Exception {
+ // Test for KeyException->IOException wrapping when family has key bytes with key management
+ // enabled
+ // This covers the exception block at lines 103-105 in SecurityUtil.java
+
+ // Create a properly wrapped key first, then corrupt it to cause unwrapping failure
+ Key wrongKek = new SecretKeySpec("bad-kek-16-bytes".getBytes(), AES_CIPHER); // Exactly 16
+ // bytes
+ byte[] validWrappedKey = EncryptionUtil.wrapKey(conf, null, testKey, wrongKek);
+
+ when(mockFamily.getEncryptionKey()).thenReturn(validWrappedKey);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey); // Different KEK for unwrapping
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, mockTableDescriptor, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+ });
+
+ // The IOException should wrap a KeyException from the unwrapping process
+ assertNotNull("Exception should have a cause", exception.getCause());
+ assertTrue("Exception cause should be a KeyException",
+ exception.getCause() instanceof KeyException);
+ }
+
+ // Tests for the second createEncryptionContext method (for reading files)
+
+ @Test
+ public void testWithNoKeyMaterial() throws IOException {
+ when(mockTrailer.getEncryptionKey()).thenReturn(null);
+ when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ assertEquals(Encryption.Context.NONE, result);
+ }
+ }
+
+ // Tests for the second createEncryptionContext method (for reading files)
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ SecurityTests.class, SmallTests.class })
+ public static class TestCreateEncryptionContext_ForReads extends TestSecurityUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestCreateEncryptionContext_ForReads.class);
+
+ @Test
+ public void testWithKEKMetadata_STKLookupFirstThenManagedKey() throws Exception {
+ // Test new logic: STK lookup happens first, then metadata lookup if STK fails
+ // Set up scenario where both checksum and metadata are available
+ setupTrailerMocks(testWrappedKey, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, null);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ // STK lookup should succeed and be used (first priority)
+ ManagedKeyData stkKeyData = mock(ManagedKeyData.class);
+ when(stkKeyData.getTheKey()).thenReturn(kekKey);
+ setupSystemKeyCache(TEST_KEK_CHECKSUM, stkKeyData);
+
+ // Also set up managed key cache (but it shouldn't be used since STK succeeds)
+ setupManagedKeyDataCacheEntry(testTableNamespace, TEST_KEK_METADATA, testWrappedKey,
+ mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey())
+ .thenThrow(new RuntimeException("This should not be called"));
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Should use STK data, not managed key data
+ assertEquals(stkKeyData, result.getKEKData());
+ }
+
+ @Test
+ public void testWithKEKMetadata_STKFailsThenManagedKeySucceeds() throws Exception {
+ // Test fallback: STK lookup fails, metadata lookup succeeds
+ setupTrailerMocks(testWrappedKey, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, testTableNamespace);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ // STK lookup should fail (returns null)
+ when(mockSystemKeyCache.getSystemKeyByChecksum(TEST_KEK_CHECKSUM)).thenReturn(null);
+
+ // Managed key lookup should succeed
+ setupManagedKeyDataCacheEntry(testTableNamespace, TEST_KEK_METADATA, testWrappedKey,
+ mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Should use managed key data since STK failed
+ assertEquals(mockManagedKeyData, result.getKEKData());
+ }
+
+ @Test
+ public void testWithKeyManagement_KEKMetadataAndChecksumFailure()
+ throws IOException, KeyException {
+ // Test scenario where both STK lookup and managed key lookup fail
+ byte[] keyBytes = "test-encrypted-key".getBytes();
+ String kekMetadata = "test-kek-metadata";
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(kekMetadata);
+ when(mockTrailer.getKEKChecksum()).thenReturn(TEST_KEK_CHECKSUM);
+ when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+
+ // STK lookup should fail
+ when(mockSystemKeyCache.getSystemKeyByChecksum(TEST_KEK_CHECKSUM)).thenReturn(null);
+
+ // Managed key lookup should also fail
+ when(mockManagedKeyDataCache.getEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq("test-namespace"), eq(kekMetadata), eq(keyBytes)))
+ .thenThrow(new IOException("Key not found"));
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(
+ exception.getMessage().contains("Failed to get key data for KEK metadata: " + kekMetadata));
+ assertTrue(exception.getCause().getMessage().contains("Key not found"));
+ }
+
+ @Test
+ public void testWithKeyManagement_UseSystemKey() throws IOException {
+ // Test STK lookup by checksum (first priority in new logic)
+ setupTrailerMocks(testWrappedKey, null, TEST_KEK_CHECKSUM, null);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupSystemKeyCache(TEST_KEK_CHECKSUM, mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ assertEquals(mockManagedKeyData, result.getKEKData());
+ }
+
+ @Test
+ public void testBackwardsCompatibility_WithKeyManagement_LatestSystemKeyNotFound()
+ throws IOException {
+ // Test when both STK lookup by checksum fails and latest system key is null
+ byte[] keyBytes = "test-encrypted-key".getBytes();
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+
+ // Enable key management
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+
+ // Both checksum lookup and latest system key lookup should fail
+ when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(null);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("Failed to get latest system key"));
+ }
+
+ @Test
+ public void testBackwardsCompatibility_FallbackToLatestSystemKey() throws IOException {
+ // Test fallback to latest system key when both checksum and metadata are unavailable
+ setupTrailerMocks(testWrappedKey, null, 0L, TEST_NAMESPACE); // No checksum, no metadata
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ ManagedKeyData latestSystemKey = mock(ManagedKeyData.class);
+ when(latestSystemKey.getTheKey()).thenReturn(kekKey);
+ when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(latestSystemKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ assertEquals(latestSystemKey, result.getKEKData());
+ }
+
+ @Test
+ public void testWithoutKeyManagemntEnabled() throws IOException {
+ byte[] wrappedKey = createRandomWrappedKey(conf);
+ when(mockTrailer.getEncryptionKey()).thenReturn(wrappedKey);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result, false);
+ }
+
+ @Test
+ public void testKeyManagementBackwardsCompatibility() throws Exception {
+ when(mockTrailer.getEncryptionKey()).thenReturn(testWrappedKey);
+ when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result, true);
+ }
+
+ @Test
+ public void testWithoutKeyManagement_UnwrapFailure() throws IOException {
+ byte[] invalidKeyBytes = INVALID_KEY_DATA.getBytes();
+ when(mockTrailer.getEncryptionKey()).thenReturn(invalidKeyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+
+ Exception exception = assertThrows(Exception.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ // The exception should indicate that unwrapping failed - could be IOException or
+ // RuntimeException
+ assertNotNull(exception);
+ }
+
+ @Test
+ public void testCreateEncryptionContext_WithoutKeyManagement_UnavailableCipher()
+ throws Exception {
+ // Create a DES key and wrap it first with working configuration
+ Key desKey = new SecretKeySpec("test-key-16-byte".getBytes(), "DES");
+ byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, HBASE_KEY, desKey);
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(wrappedDESKey);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+
+ // Disable key management and use null cipher provider
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+ setUpEncryptionConfigWithNullCipher();
+
+ RuntimeException exception = assertThrows(RuntimeException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("Cipher 'AES' not available"));
+ }
+
+ @Test
+ public void testCreateEncryptionContext_WithKeyManagement_NullKeyManagementCache()
+ throws IOException {
+ byte[] keyBytes = "test-encrypted-key".getBytes();
+ String kekMetadata = "test-kek-metadata";
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(kekMetadata);
+ when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+
+ // Enable key management
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, null, mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("ManagedKeyDataCache is null"));
+ }
+
+ @Test
+ public void testCreateEncryptionContext_WithKeyManagement_NullSystemKeyCache()
+ throws IOException {
+ byte[] keyBytes = "test-encrypted-key".getBytes();
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+ when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+
+ // Enable key management
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ null);
+ });
+
+ assertTrue(exception.getMessage()
+ .contains("SystemKeyCache can't be null when using key management feature"));
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ SecurityTests.class, SmallTests.class })
+ public static class TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException
+ extends TestSecurityUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule
+ .forClass(TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException.class);
+
+ @Parameter(0)
+ public boolean isKeyException;
+
+ @Parameterized.Parameters(name = "{index},isKeyException={0}")
+ public static Collection data() {
+ return Arrays.asList(new Object[][] { { true }, { false }, });
+ }
+
+ @Test
+ public void testWithDEK() throws IOException, KeyException {
+ byte[] wrappedKey = createRandomWrappedKey(conf);
+ MockAesKeyProvider keyProvider = (MockAesKeyProvider) Encryption.getKeyProvider(conf);
+ keyProvider.clearKeys(); // Let a new key be instantiated and cause a unwrap failure.
+
+ setupTrailerMocks(wrappedKey, null, 0L, null);
+ setupManagedKeyDataCacheEntry(TEST_NAMESPACE, TEST_KEK_METADATA, wrappedKey,
+ mockManagedKeyData);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("Key was not successfully unwrapped"));
+ // The root cause should be some kind of parsing/unwrapping exception
+ assertNotNull(exception.getCause());
+ }
+
+ @Test
+ public void testWithSystemKey() throws IOException {
+ // Use invalid key bytes to trigger unwrapping failure
+ byte[] invalidKeyBytes = INVALID_SYSTEM_KEY_DATA.getBytes();
+
+ setupTrailerMocks(invalidKeyBytes, null, TEST_KEK_CHECKSUM, null);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupSystemKeyCache(TEST_KEK_CHECKSUM, mockManagedKeyData);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains(
+ "Failed to unwrap key with KEK checksum: " + TEST_KEK_CHECKSUM + ", metadata: null"));
+ // The root cause should be some kind of parsing/unwrapping exception
+ assertNotNull(exception.getCause());
+ }
+ }
+
+ protected void verifyContext(Encryption.Context context) {
+ verifyContext(context, true);
+ }
+
+ protected void verifyContext(Encryption.Context context, boolean withKeyManagement) {
+ assertNotNull(context);
+ assertNotNull("Context should have a cipher", context.getCipher());
+ assertNotNull("Context should have a key", context.getKey());
+ if (withKeyManagement) {
+ assertNotNull("Context should have KEK data when key management is enabled",
+ context.getKEKData());
+ } else {
+ assertNull("Context should not have KEK data when key management is disabled",
+ context.getKEKData());
+ }
+ }
+
+ /**
+ * Null cipher provider for testing error cases.
+ */
+ public static class NullCipherProvider implements CipherProvider {
+ private Configuration conf;
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+
+ @Override
+ public String getName() {
+ return "null";
+ }
+
+ @Override
+ public String[] getSupportedCiphers() {
+ return new String[0];
+ }
+
+ @Override
+ public Cipher getCipher(String name) {
+ return null; // Always return null to simulate unavailable cipher
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 58ffdcf91d43..273385ec9c84 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -55,6 +55,7 @@
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.ipc.SimpleRpcServer;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.security.SecurityInfo;
@@ -359,6 +360,11 @@ public Connection createConnection(Configuration conf) throws IOException {
public AsyncClusterConnection getAsyncClusterConnection() {
return null;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
@Parameters(name = "{index}: rpcServerImpl={0}")
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
index 7b2749177889..a0246fee2955 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
@@ -178,7 +178,7 @@ public void testSkipReplayAndUpdateSeqId() throws Exception {
for (RegionInfo restoredRegion : restoredRegions) {
// open restored region
HRegion region = HRegion.newHRegion(CommonFSUtils.getTableDir(restoreDir, tableName), null,
- fs, conf, restoredRegion, htd, null);
+ fs, conf, restoredRegion, htd, null, null);
// set restore flag
region.setRestoredRegion(true);
region.initialize();
@@ -188,7 +188,7 @@ public void testSkipReplayAndUpdateSeqId() throws Exception {
// open restored region without set restored flag
HRegion region2 = HRegion.newHRegion(CommonFSUtils.getTableDir(restoreDir, tableName), null,
- fs, conf, restoredRegion, htd, null);
+ fs, conf, restoredRegion, htd, null, null);
region2.initialize();
long maxSeqId2 = WALSplitUtil.getMaxRegionSequenceId(fs, recoveredEdit);
Assert.assertTrue(maxSeqId2 > maxSeqId);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 90f4a7555b93..77b6ceffe7ca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.slf4j.Logger;
@@ -119,4 +120,9 @@ public Connection createConnection(Configuration conf) throws IOException {
public AsyncClusterConnection getAsyncClusterConnection() {
throw new UnsupportedOperationException();
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java
index f0cc2febd6e8..7b67b838659b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.util;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
@@ -30,7 +31,9 @@
import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider;
import org.apache.hadoop.hbase.io.crypto.MockAesKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.ClassRule;
@@ -130,6 +133,71 @@ public void testTestEnabledWhenCryptoIsExplicitlyDisabled() throws Exception {
EncryptionTest.testEncryption(conf, algorithm, null);
}
+ // Utility methods for configuration setup
+ private Configuration createManagedKeyProviderConfig() {
+ Configuration conf = HBaseConfiguration.create();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
+ return conf;
+ }
+
+ @Test
+ public void testManagedKeyProvider() throws Exception {
+ Configuration conf = createManagedKeyProviderConfig();
+ EncryptionTest.testKeyProvider(conf);
+ assertTrue("Managed provider should be cached", EncryptionTest.keyProviderResults
+ .containsKey(conf.get(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY)));
+ }
+
+ @Test(expected = IOException.class)
+ public void testBadManagedKeyProvider() throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ FailingManagedKeyProvider.class.getName());
+ EncryptionTest.testKeyProvider(conf);
+ fail("Instantiation of bad managed key provider should have failed check");
+ }
+
+ @Test
+ public void testEncryptionWithManagedKeyProvider() throws Exception {
+ Configuration conf = createManagedKeyProviderConfig();
+ String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
+ EncryptionTest.testEncryption(conf, algorithm, null);
+ assertTrue("Managed provider should be cached", EncryptionTest.keyProviderResults
+ .containsKey(conf.get(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY)));
+ }
+
+ @Test(expected = IOException.class)
+ public void testUnknownCipherWithManagedKeyProvider() throws Exception {
+ Configuration conf = createManagedKeyProviderConfig();
+ EncryptionTest.testEncryption(conf, "foobar", null);
+ fail("Test for bogus cipher should have failed with managed key provider");
+ }
+
+ @Test(expected = IOException.class)
+ public void testManagedKeyProviderWhenCryptoIsExplicitlyDisabled() throws Exception {
+ Configuration conf = createManagedKeyProviderConfig();
+ String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
+ conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, false);
+ EncryptionTest.testEncryption(conf, algorithm, null);
+ assertTrue("Managed provider should be cached", EncryptionTest.keyProviderResults
+ .containsKey(conf.get(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY)));
+ }
+
+ @Test(expected = IOException.class)
+ public void testManagedKeyProviderWithKeyManagementDisabled() throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+ // This should cause issues since we're trying to use managed provider without enabling key
+ // management
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, ManagedKeyStoreKeyProvider.class.getName());
+
+ EncryptionTest.testKeyProvider(conf);
+ fail("Should have failed when using managed provider with key management disabled");
+ }
+
public static class FailingKeyProvider implements KeyProvider {
@Override
@@ -181,4 +249,12 @@ public Cipher getCipher(String name) {
}
}
+
+ // Helper class for testing failing managed key provider
+ public static class FailingManagedKeyProvider extends MockManagedKeyProvider {
+ @Override
+ public void initConfig(Configuration conf, String params) {
+ throw new RuntimeException("BAD MANAGED PROVIDER!");
+ }
+ }
}
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index fb699554fc06..d595b2dd219d 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -41,6 +41,12 @@
org.apache.hbasehbase-common
+
+ org.apache.hbase
+ hbase-common
+ test-jar
+ test
+ org.apache.hbasehbase-annotations
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 93cc312338c9..2b1d29e7849e 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -1223,6 +1223,10 @@ def cfd(arg, tdb)
cfdb.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key,
algorithm))
end
+ if arg.include?(ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY_NAMESPACE)
+ cfdb.setEncryptionKeyNamespace(arg.delete(
+ ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY_NAMESPACE))
+ end
end
if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT)
compression = arg.delete(ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT).upcase.to_sym
diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb
index a9b35ed1de21..a7e531806cfe 100644
--- a/hbase-shell/src/main/ruby/hbase/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase/hbase.rb
@@ -1,3 +1,5 @@
+# frozen_string_literal: true
+
#
#
# Licensed to the Apache Software Foundation (ASF) under one
@@ -29,6 +31,7 @@
require 'hbase/visibility_labels'
module Hbase
+ # Main HBase class for connection and admin operations
class Hbase
attr_accessor :configuration
@@ -45,18 +48,21 @@ def initialize(config = nil)
end
def connection
- if @connection.nil?
- @connection = ConnectionFactory.createConnection(configuration)
- end
+ @connection = ConnectionFactory.createConnection(configuration) if @connection.nil?
@connection
end
+
# Returns ruby's Admin class from admin.rb
def admin
- ::Hbase::Admin.new(self.connection)
+ ::Hbase::Admin.new(connection)
end
def rsgroup_admin
- ::Hbase::RSGroupAdmin.new(self.connection)
+ ::Hbase::RSGroupAdmin.new(connection)
+ end
+
+ def keymeta_admin
+ ::Hbase::KeymetaAdmin.new(connection)
end
def taskmonitor
@@ -65,7 +71,7 @@ def taskmonitor
# Create new one each time
def table(table, shell)
- ::Hbase::Table.new(self.connection.getTable(TableName.valueOf(table)), shell)
+ ::Hbase::Table.new(connection.getTable(TableName.valueOf(table)), shell)
end
def replication_admin
@@ -73,21 +79,19 @@ def replication_admin
end
def security_admin
- ::Hbase::SecurityAdmin.new(self.connection.getAdmin)
+ ::Hbase::SecurityAdmin.new(connection.getAdmin)
end
def visibility_labels_admin
- ::Hbase::VisibilityLabelsAdmin.new(self.connection.getAdmin)
+ ::Hbase::VisibilityLabelsAdmin.new(connection.getAdmin)
end
def quotas_admin
- ::Hbase::QuotasAdmin.new(self.connection.getAdmin)
+ ::Hbase::QuotasAdmin.new(connection.getAdmin)
end
def shutdown
- if @connection != nil
- @connection.close
- end
+ @connection&.close
end
end
end
diff --git a/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb b/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb
new file mode 100644
index 000000000000..12cd5445b066
--- /dev/null
+++ b/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb
@@ -0,0 +1,95 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'java'
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyData
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider
+java_import org.apache.hadoop.hbase.keymeta.KeymetaAdminClient
+
+module Hbase
+ # KeymetaAdmin is a class that provides a Ruby interface to the HBase Key Management API.
+ # It is used to interface with the HBase Key Management API.
+ class KeymetaAdmin
+ def initialize(connection)
+ @connection = connection
+ @admin = KeymetaAdminClient.new(connection)
+ @hb_admin = @connection.getAdmin
+ end
+
+ def close
+ @admin.close
+ end
+
+ def enable_key_management(key_info)
+ cust, namespace = extract_cust_info(key_info)
+ @admin.enableKeyManagement(cust, namespace)
+ end
+
+ def get_key_statuses(key_info)
+ cust, namespace = extract_cust_info(key_info)
+ @admin.getManagedKeys(cust, namespace)
+ end
+
+ def disable_key_management(key_info)
+ cust, namespace = extract_cust_info(key_info)
+ @admin.disableKeyManagement(cust, namespace)
+ end
+
+ def disable_managed_key(key_info, key_metadata_hash_base64)
+ cust, namespace = extract_cust_info(key_info)
+ key_metadata_hash_bytes = decode_to_bytes(key_metadata_hash_base64)
+ @admin.disableManagedKey(cust, namespace, key_metadata_hash_bytes)
+ end
+
+ def rotate_managed_key(key_info)
+ cust, namespace = extract_cust_info(key_info)
+ @admin.rotateManagedKey(cust, namespace)
+ end
+
+ def refresh_managed_keys(key_info)
+ cust, namespace = extract_cust_info(key_info)
+ @admin.refreshManagedKeys(cust, namespace)
+ end
+
+ def rotate_stk
+ @admin.rotateSTK
+ end
+
+ def extract_cust_info(key_info)
+ cust_info = key_info.split(':')
+ raise(ArgumentError, 'Invalid cust:namespace format') unless [1, 2].include?(cust_info.length)
+
+ custodian = cust_info[0]
+ namespace = cust_info.length > 1 ? cust_info[1] : ManagedKeyData::KEY_SPACE_GLOBAL
+ cust_bytes = decode_to_bytes custodian
+
+ [cust_bytes, namespace]
+ end
+
+ def decode_to_bytes(base64_string)
+ begin
+ ManagedKeyProvider.decodeToBytes(base64_string)
+ rescue Java::JavaIo::IOException => e
+ message = e.cause&.message || e.message
+ raise(ArgumentError, "Failed to decode Base64 encoded string '#{base64_string}': #{message}")
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb
index d4df1f8f5821..67892e5538c0 100644
--- a/hbase-shell/src/main/ruby/hbase_constants.rb
+++ b/hbase-shell/src/main/ruby/hbase_constants.rb
@@ -138,3 +138,4 @@ def self.promote_constants(constants)
require 'hbase/security'
require 'hbase/visibility_labels'
require 'hbase/rsgroup_admin'
+require 'hbase/keymeta_admin'
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 6be3854b8a57..665fa4d06bbd 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -150,6 +150,10 @@ def hbase_rsgroup_admin
@rsgroup_admin ||= hbase.rsgroup_admin
end
+ def hbase_keymeta_admin
+ @hbase_keymeta_admin ||= hbase.keymeta_admin
+ end
+
##
# Create singleton methods on the target receiver object for all the loaded commands
#
@@ -616,6 +620,23 @@ def self.exception_handler(hide_traceback)
]
)
+Shell.load_command_group(
+ 'keymeta',
+ full_name: 'Keymeta',
+ comment: "NOTE: The KeyMeta Coprocessor Endpoint must be enabled on the Master else commands fail
+ with: UnknownProtocolException: No registered Master Coprocessor Endpoint found for
+ ManagedKeysService",
+ commands: %w[
+ enable_key_management
+ show_key_status
+ rotate_stk
+ disable_key_management
+ disable_managed_key
+ refresh_managed_keys
+ rotate_managed_key
+ ]
+)
+
Shell.load_command_group(
'rsgroup',
full_name: 'RSGroups',
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb
index a40f737e7908..a97dddc4e6a0 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -105,6 +105,10 @@ def rsgroup_admin
@shell.hbase_rsgroup_admin
end
+ def keymeta_admin
+ @shell.hbase_keymeta_admin
+ end
+
#----------------------------------------------------------------------
# Creates formatter instance first time and then reuses it.
def formatter
diff --git a/hbase-shell/src/main/ruby/shell/commands/disable_key_management.rb b/hbase-shell/src/main/ruby/shell/commands/disable_key_management.rb
new file mode 100644
index 000000000000..ead9dce96e8f
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/disable_key_management.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # DisableKeyManagement is a class that provides a Ruby interface to disable key management via
+ # HBase Key Management API.
+ class DisableKeyManagement < KeymetaCommandBase
+ def help
+ <<-EOF
+Disable key management for a given cust:namespace (cust in Base64 format).
+If no namespace is specified, the global namespace (*) is used.
+
+Example:
+ hbase> disable_key_management 'cust:namespace'
+ hbase> disable_key_management 'cust'
+ EOF
+ end
+
+ def command(key_info)
+ statuses = [keymeta_admin.disable_key_management(key_info)]
+ print_key_statuses(statuses)
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/disable_managed_key.rb b/hbase-shell/src/main/ruby/shell/commands/disable_managed_key.rb
new file mode 100644
index 000000000000..4384c0f3c825
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/disable_managed_key.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # DisableManagedKey is a class that provides a Ruby interface to disable a managed key via
+ # HBase Key Management API.
+ class DisableManagedKey < KeymetaCommandBase
+ def help
+ <<-EOF
+Disable a managed key for a given cust:namespace (cust in Base64 encoded) and key metadata hash
+(Base64 encoded). If no namespace is specified, the global namespace (*) is used.
+
+Example:
+ hbase> disable_managed_key 'cust:namespace key_metadata_hash_base64'
+ hbase> disable_managed_key 'cust key_metadata_hash_base64'
+ EOF
+ end
+
+ def command(key_info, key_metadata_hash_base64)
+ statuses = [keymeta_admin.disable_managed_key(key_info, key_metadata_hash_base64)]
+ print_key_statuses(statuses)
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb b/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb
new file mode 100644
index 000000000000..d594fa024b68
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # EnableKeyManagement is a class that provides a Ruby interface to enable key management via
+ # HBase Key Management API.
+ class EnableKeyManagement < KeymetaCommandBase
+ def help
+ <<-EOF
+Enable key management for a given cust:namespace (cust in Base64 encoded).
+If no namespace is specified, the global namespace (*) is used.
+
+Example:
+ hbase> enable_key_management 'cust:namespace'
+ hbase> enable_key_management 'cust'
+ EOF
+ end
+
+ def command(key_info)
+ statuses = [keymeta_admin.enable_key_management(key_info)]
+ print_key_statuses(statuses)
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb
new file mode 100644
index 000000000000..98a57766831a
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+module Shell
+ module Commands
+ # KeymetaCommandBase is a base class for all key management commands.
+ class KeymetaCommandBase < Command
+ def print_key_statuses(statuses)
+ formatter.header(%w[ENCODED-KEY NAMESPACE STATUS METADATA METADATA-HASH REFRESH-TIMESTAMP])
+ statuses.each { |status| formatter.row(format_status_row(status)) }
+ formatter.footer(statuses.size)
+ end
+
+ private
+
+ def format_status_row(status)
+ [
+ status.getKeyCustodianEncoded,
+ status.getKeyNamespace,
+ status.getKeyState.toString,
+ status.getKeyMetadata,
+ status.getKeyMetadataHashEncoded,
+ status.getRefreshTimestamp
+ ]
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/refresh_managed_keys.rb b/hbase-shell/src/main/ruby/shell/commands/refresh_managed_keys.rb
new file mode 100644
index 000000000000..f4c462ceee19
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/refresh_managed_keys.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # RefreshManagedKeys is a class that provides a Ruby interface to refresh managed keys via
+ # HBase Key Management API.
+ class RefreshManagedKeys < KeymetaCommandBase
+ def help
+ <<-EOF
+Refresh all managed keys for a given cust:namespace (cust in Base64 encoded).
+If no namespace is specified, the global namespace (*) is used.
+
+Example:
+ hbase> refresh_managed_keys 'cust:namespace'
+ hbase> refresh_managed_keys 'cust'
+ EOF
+ end
+
+ def command(key_info)
+ keymeta_admin.refresh_managed_keys(key_info)
+ puts "Managed keys refreshed successfully"
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/rotate_managed_key.rb b/hbase-shell/src/main/ruby/shell/commands/rotate_managed_key.rb
new file mode 100644
index 000000000000..6372d30839e5
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/rotate_managed_key.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # RotateManagedKey is a class that provides a Ruby interface to rotate a managed key via
+ # HBase Key Management API.
+ class RotateManagedKey < KeymetaCommandBase
+ def help
+ <<-EOF
+Rotate the ACTIVE managed key for a given cust:namespace (cust in Base64 encoded).
+If no namespace is specified, the global namespace (*) is used.
+
+Example:
+ hbase> rotate_managed_key 'cust:namespace'
+ hbase> rotate_managed_key 'cust'
+ EOF
+ end
+
+ def command(key_info)
+ statuses = [keymeta_admin.rotate_managed_key(key_info)]
+ print_key_statuses(statuses)
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/rotate_stk.rb b/hbase-shell/src/main/ruby/shell/commands/rotate_stk.rb
new file mode 100644
index 000000000000..f1c754487c40
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/rotate_stk.rb
@@ -0,0 +1,51 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # RotateStk is a class that provides a Ruby interface to rotate the System Key (STK)
+ # via HBase Key Management API.
+ class RotateStk < KeymetaCommandBase
+ def help
+ <<-EOF
+Rotate the System Key (STK) if a new key is detected.
+This command checks for a new system key and propagates it to all region servers.
+Returns true if a new key was detected and rotated, false otherwise.
+
+Example:
+ hbase> rotate_stk
+ EOF
+ end
+
+ def command
+ result = keymeta_admin.rotate_stk
+ if result
+ formatter.row(['System Key rotation was performed successfully and cache was refreshed ' \
+ 'on all region servers'])
+ else
+ formatter.row(['No System Key change was detected'])
+ end
+ result
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb b/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb
new file mode 100644
index 000000000000..d3670d094ed3
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # ShowKeyStatus is a class that provides a Ruby interface to show key statuses via
+ # HBase Key Management API.
+ class ShowKeyStatus < KeymetaCommandBase
+ def help
+ <<-EOF
+Show key statuses for a given cust:namespace (cust in Base64 format).
+If no namespace is specified, the global namespace (*) is used.
+
+Example:
+ hbase> show_key_status 'cust:namespace'
+ hbase> show_key_status 'cust'
+ EOF
+ end
+
+ def command(key_info)
+ statuses = keymeta_admin.get_key_statuses(key_info)
+ print_key_statuses(statuses)
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java
new file mode 100644
index 000000000000..8315d05f3feb
--- /dev/null
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.jruby.embed.ScriptingContainer;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ ClientTests.class, IntegrationTests.class })
+public class TestKeymetaAdminShell extends ManagedKeyTestBase implements RubyShellTest {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaAdminShell.class);
+
+ private final ScriptingContainer jruby = new ScriptingContainer();
+
+ @Before
+ public void setUp() throws Exception {
+ final Configuration conf = TEST_UTIL.getConfiguration();
+ // Enable to be able to debug without timing out.
+ // conf.set("zookeeper.session.timeout", "6000000");
+ // conf.set("hbase.rpc.timeout", "6000000");
+ // conf.set("hbase.rpc.read.timeout", "6000000");
+ // conf.set("hbase.rpc.write.timeout", "6000000");
+ // conf.set("hbase.client.operation.timeout", "6000000");
+ // conf.set("hbase.client.scanner.timeout.period", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.connect", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.read", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.write", "6000000");
+ // conf.set("hbase.master.start.timeout.localHBaseCluster", "6000000");
+ // conf.set("hbase.master.init.timeout.localHBaseCluster", "6000000");
+ // conf.set("hbase.client.sync.wait.timeout.msec", "6000000");
+ // conf.set("hbase.client.retries.number", "1000");
+ Map cust_to_key = new HashMap<>();
+ Map cust_to_alias = new HashMap<>();
+ String clusterId = UUID.randomUUID().toString();
+ String SYSTEM_KEY_ALIAS = "system-key-alias";
+ String CUST1 = "cust1";
+ String CUST1_ALIAS = "cust1-alias";
+ String CF_NAMESPACE = "test_table/f";
+ String GLOB_CUST_ALIAS = "glob-cust-alias";
+ String CUSTOM_NAMESPACE = "test_namespace";
+ String CUSTOM_NAMESPACE_ALIAS = "custom-namespace-alias";
+ String CUSTOM_GLOBAL_NAMESPACE = "test_global_namespace";
+ String CUSTOM_GLOBAL_NAMESPACE_ALIAS = "custom-global-namespace-alias";
+ if (isWithKeyManagement()) {
+ String providerParams = KeymetaTestUtils.setupTestKeyStore(TEST_UTIL, true, true, store -> {
+ Properties p = new Properties();
+ try {
+ KeymetaTestUtils.addEntry(conf, 128, store, CUST1_ALIAS, CUST1, true, cust_to_key,
+ cust_to_alias, p);
+ KeymetaTestUtils.addEntry(conf, 128, store, CUST1_ALIAS, CUST1, true, cust_to_key,
+ cust_to_alias, p, CF_NAMESPACE);
+ KeymetaTestUtils.addEntry(conf, 128, store, GLOB_CUST_ALIAS, "*", true, cust_to_key,
+ cust_to_alias, p);
+ KeymetaTestUtils.addEntry(conf, 128, store, SYSTEM_KEY_ALIAS, clusterId, true,
+ cust_to_key, cust_to_alias, p);
+ KeymetaTestUtils.addEntry(conf, 128, store, CUSTOM_NAMESPACE_ALIAS, CUST1, true,
+ cust_to_key, cust_to_alias, p, CUSTOM_NAMESPACE);
+ KeymetaTestUtils.addEntry(conf, 128, store, CUSTOM_GLOBAL_NAMESPACE_ALIAS, "*", true,
+ cust_to_key, cust_to_alias, p, CUSTOM_GLOBAL_NAMESPACE);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return p;
+ });
+ // byte[] systemKey = cust2key.get(new Bytes(clusterId.getBytes())).get();
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS);
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY, providerParams);
+ }
+ RubyShellTest.setUpConfig(this);
+ super.setUp();
+ RubyShellTest.setUpJRubyRuntime(this);
+ RubyShellTest.doTestSetup(this);
+ addCustodianRubyEnvVars(jruby, "GLOB_CUST", "*");
+ addCustodianRubyEnvVars(jruby, "CUST1", CUST1);
+ jruby.put("$TEST", this);
+ }
+
+ @Override
+ public HBaseTestingUtil getTEST_UTIL() {
+ return TEST_UTIL;
+ }
+
+ @Override
+ public ScriptingContainer getJRuby() {
+ return jruby;
+ }
+
+ @Override
+ public String getSuitePattern() {
+ return "**/*_keymeta_test.rb";
+ }
+
+ @Test
+ public void testRunShellTests() throws Exception {
+ RubyShellTest.testRunShellTests(this);
+ }
+
+ @Override
+ protected Class extends ManagedKeyProvider> getKeyProviderClass() {
+ return ManagedKeyStoreKeyProvider.class;
+ }
+
+ public static void addCustodianRubyEnvVars(ScriptingContainer jruby, String custId,
+ String custodian) {
+ jruby.put("$" + custId, custodian);
+ jruby.put("$" + custId + "_ALIAS", custodian + "-alias");
+ jruby.put("$" + custId + "_ENCODED", ManagedKeyProvider.encodeToStr(custodian.getBytes()));
+ }
+}
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMigration.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMigration.java
new file mode 100644
index 000000000000..efe124989e56
--- /dev/null
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMigration.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.junit.ClassRule;
+import org.junit.experimental.categories.Category;
+
+@Category({ ClientTests.class, IntegrationTests.class })
+public class TestKeymetaMigration extends TestKeymetaAdminShell {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaMigration.class);
+
+ @Override
+ public String getSuitePattern() {
+ return "**/*_keymeta_migration_test.rb";
+ }
+
+ @Override
+ protected boolean isWithKeyManagement() {
+ return false;
+ }
+
+ @Override
+ protected boolean isWithMiniClusterStart() {
+ return false;
+ }
+
+ @Override
+ protected TableName getSystemTableNameToWaitFor() {
+ return TableName.META_TABLE_NAME;
+ }
+}
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMockProviderShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMockProviderShell.java
new file mode 100644
index 000000000000..cc4aabe4ff4e
--- /dev/null
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMockProviderShell.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.jruby.embed.ScriptingContainer;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ ClientTests.class, IntegrationTests.class })
+public class TestKeymetaMockProviderShell extends ManagedKeyTestBase implements RubyShellTest {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaMockProviderShell.class);
+
+ private final ScriptingContainer jruby = new ScriptingContainer();
+
+ @Before
+ @Override
+ public void setUp() throws Exception {
+ // Enable to be able to debug without timing out.
+ // final Configuration conf = TEST_UTIL.getConfiguration();
+ // conf.set("zookeeper.session.timeout", "6000000");
+ // conf.set("hbase.rpc.timeout", "6000000");
+ // conf.set("hbase.rpc.read.timeout", "6000000");
+ // conf.set("hbase.rpc.write.timeout", "6000000");
+ // conf.set("hbase.client.operation.timeout", "6000000");
+ // conf.set("hbase.client.scanner.timeout.period", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.connect", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.read", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.write", "6000000");
+ // conf.set("hbase.master.start.timeout.localHBaseCluster", "6000000");
+ // conf.set("hbase.master.init.timeout.localHBaseCluster", "6000000");
+ // conf.set("hbase.client.sync.wait.timeout.msec", "6000000");
+ // conf.set("hbase.client.retries.number", "1000");
+ RubyShellTest.setUpConfig(this);
+ super.setUp();
+ RubyShellTest.setUpJRubyRuntime(this);
+ RubyShellTest.doTestSetup(this);
+ jruby.put("$TEST", this);
+ }
+
+ @Override
+ public HBaseTestingUtil getTEST_UTIL() {
+ return TEST_UTIL;
+ }
+
+ @Override
+ public ScriptingContainer getJRuby() {
+ return jruby;
+ }
+
+ @Override
+ public String getSuitePattern() {
+ return "**/*_keymeta_mock_provider_test.rb";
+ }
+
+ @Test
+ public void testRunShellTests() throws Exception {
+ RubyShellTest.testRunShellTests(this);
+ }
+}
diff --git a/hbase-shell/src/test/ruby/shell/admin_keymeta_mock_provider_test.rb b/hbase-shell/src/test/ruby/shell/admin_keymeta_mock_provider_test.rb
new file mode 100644
index 000000000000..061e3fc71230
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/admin_keymeta_mock_provider_test.rb
@@ -0,0 +1,143 @@
+# frozen_string_literal: true
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+
+java_import org.apache.hadoop.hbase.io.crypto.Encryption
+java_import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider
+java_import org.apache.hadoop.hbase.ipc.RemoteWithExtrasException
+
+module Hbase
+ # Test class for keymeta admin functionality with MockManagedKeyProvider
+ class KeymetaAdminMockProviderTest < Test::Unit::TestCase
+ include TestHelpers
+
+ def setup
+ setup_hbase
+ @key_provider = Encryption.getManagedKeyProvider($TEST_CLUSTER.getConfiguration)
+ # Enable multikey generation mode for dynamic key creation on rotate
+ @key_provider.setMultikeyGenMode(true)
+
+ # Set up custodian variables
+ @glob_cust = '*'
+ @glob_cust_encoded = ManagedKeyProvider.encodeToStr(@glob_cust.bytes.to_a)
+ end
+
+ define_test 'Test rotate managed key operation' do
+ test_rotate_key(@glob_cust_encoded, '*')
+ test_rotate_key(@glob_cust_encoded, 'test_namespace')
+ end
+
+ def test_rotate_key(cust, namespace)
+ cust_and_namespace = "#{cust}:#{namespace}"
+ puts "Testing rotate_managed_key for #{cust_and_namespace}"
+
+ # 1. Enable key management first
+ output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
+ puts "enable_key_management output: #{output}"
+ assert(output.include?("#{cust} #{namespace} ACTIVE"),
+ "Expected ACTIVE key after enable, got: #{output}")
+
+ # Verify initial state - should have 1 ACTIVE key
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status before rotation: #{output}"
+ assert(output.include?('1 row(s)'), "Expected 1 key before rotation, got: #{output}")
+
+ # 2. Rotate the managed key (mock provider will generate a new key due to multikeyGenMode)
+ output = capture_stdout { @shell.command('rotate_managed_key', cust_and_namespace) }
+ puts "rotate_managed_key output: #{output}"
+ assert(output.include?("#{cust} #{namespace}"),
+ "Expected key info in rotation output, got: #{output}")
+
+ # 3. Verify we now have both ACTIVE and INACTIVE keys
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status after rotation: #{output}"
+ assert(output.include?('ACTIVE'),
+ "Expected ACTIVE key after rotation, got: #{output}")
+ assert(output.include?('INACTIVE'),
+ "Expected INACTIVE key after rotation, got: #{output}")
+ assert(output.include?('2 row(s)'),
+ "Expected 2 keys after rotation, got: #{output}")
+
+ # 4. Rotate again to test multiple rotations
+ output = capture_stdout { @shell.command('rotate_managed_key', cust_and_namespace) }
+ puts "rotate_managed_key (second) output: #{output}"
+ assert(output.include?("#{cust} #{namespace}"),
+ "Expected key info in second rotation output, got: #{output}")
+
+ # Should now have 3 keys: 1 ACTIVE, 2 INACTIVE
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status after second rotation: #{output}"
+ assert(output.include?('3 row(s)'),
+ "Expected 3 keys after second rotation, got: #{output}")
+
+ # Cleanup - disable all keys
+ @shell.command('disable_key_management', cust_and_namespace)
+ end
+
+ define_test 'Test rotate without active key fails' do
+ cust_and_namespace = "#{@glob_cust_encoded}:nonexistent_namespace"
+ puts "Testing rotate_managed_key on non-existent namespace"
+
+ # Attempt to rotate when no key management is enabled should fail
+ e = assert_raises(RemoteWithExtrasException) do
+ @shell.command('rotate_managed_key', cust_and_namespace)
+ end
+ assert_true(e.is_do_not_retry)
+ end
+
+ define_test 'Test refresh managed keys with mock provider' do
+ cust_and_namespace = "#{@glob_cust_encoded}:test_refresh"
+ puts "Testing refresh_managed_keys for #{cust_and_namespace}"
+
+ # 1. Enable key management
+ output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
+ puts "enable_key_management output: #{output}"
+ assert(output.include?("#{@glob_cust_encoded} test_refresh ACTIVE"))
+
+ # 2. Rotate to create multiple keys
+ output = capture_stdout { @shell.command('rotate_managed_key', cust_and_namespace) }
+ puts "rotate_managed_key output: #{output}"
+ assert(output.include?("#{@glob_cust_encoded} test_refresh"),
+ "Expected key info in rotation output, got: #{output}")
+
+ # 3. Refresh managed keys - should succeed without changing state
+ output = capture_stdout { @shell.command('refresh_managed_keys', cust_and_namespace) }
+ puts "refresh_managed_keys output: #{output}"
+ assert(output.include?('Managed keys refreshed successfully'),
+ "Expected success message, got: #{output}")
+
+ # Verify keys still exist after refresh
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ assert(output.include?('ACTIVE'), "Expected ACTIVE key after refresh")
+ assert(output.include?('INACTIVE'), "Expected INACTIVE key after refresh")
+
+ # Cleanup
+ @shell.command('disable_key_management', cust_and_namespace)
+ end
+ end
+end
+
diff --git a/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb
new file mode 100644
index 000000000000..ab413ecbb0bb
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb
@@ -0,0 +1,193 @@
+# frozen_string_literal: true
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+
+module Hbase
+ # Test class for keymeta admin functionality
+ class KeymetaAdminTest < Test::Unit::TestCase
+ include TestHelpers
+
+ def setup
+ setup_hbase
+ end
+
+ define_test 'Test enable key management' do
+ test_key_management($CUST1_ENCODED, '*')
+ test_key_management($CUST1_ENCODED, 'test_table/f')
+ test_key_management($CUST1_ENCODED, 'test_namespace')
+ test_key_management($GLOB_CUST_ENCODED, '*')
+
+ puts 'Testing that cluster can be restarted when key management is enabled'
+ $TEST.restartMiniCluster
+ puts 'Cluster restarted, testing key management again'
+ setup_hbase
+ test_key_management($GLOB_CUST_ENCODED, '*')
+ puts 'Key management test complete'
+ end
+
+ def test_key_management(cust, namespace)
+ # Repeat the enable twice in a loop and ensure multiple enables succeed and return the
+ # same output.
+ 2.times do
+ cust_and_namespace = "#{cust}:#{namespace}"
+ output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
+ puts "enable_key_management output: #{output}"
+ assert(output.include?("#{cust} #{namespace} ACTIVE"))
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status output: #{output}"
+ assert(output.include?("#{cust} #{namespace} ACTIVE"))
+ assert(output.include?('1 row(s)'))
+ end
+ end
+
+ define_test 'Decode failure raises friendly error' do
+ assert_raises(ArgumentError) do
+ @shell.command('enable_key_management', '!!!:namespace')
+ end
+
+ error = assert_raises(ArgumentError) do
+ @shell.command('show_key_status', '!!!:namespace')
+ end
+ assert_match(/Failed to decode Base64 encoded string '!!!'/, error.message)
+ end
+
+ define_test 'Test key management operations without rotation' do
+ test_key_operations($CUST1_ENCODED, '*')
+ test_key_operations($CUST1_ENCODED, 'test_namespace')
+ test_key_operations($GLOB_CUST_ENCODED, '*')
+ end
+
+ def test_key_operations(cust, namespace)
+ cust_and_namespace = "#{cust}:#{namespace}"
+ puts "Testing key management operations for #{cust_and_namespace}"
+
+ # 1. Enable key management
+ output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
+ puts "enable_key_management output: #{output}"
+ assert(output.include?("#{cust} #{namespace} ACTIVE"),
+ "Expected ACTIVE key after enable, got: #{output}")
+
+ # 2. Get the initial key metadata hash for use in disable_managed_key test
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status output: #{output}"
+ # Extract the metadata hash from the output (it's in the 5th column)
+ # Output format: ENCODED-KEY NAMESPACE STATUS METADATA METADATA-HASH REFRESH-TIMESTAMP
+ lines = output.split("\n")
+ key_line = lines.find { |line| line.include?(cust) && line.include?(namespace) }
+ assert_not_nil(key_line, "Could not find key line in output")
+ # Parse the key metadata hash (Base64 encoded)
+ key_metadata_hash = key_line.split[3]
+ assert_not_nil(key_metadata_hash, "Could not extract key metadata hash")
+ puts "Extracted key metadata hash: #{key_metadata_hash}"
+
+ # 3. Refresh managed keys
+ output = capture_stdout { @shell.command('refresh_managed_keys', cust_and_namespace) }
+ puts "refresh_managed_keys output: #{output}"
+ assert(output.include?('Managed keys refreshed successfully'),
+ "Expected success message, got: #{output}")
+ # Verify keys still exist after refresh
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status after refresh: #{output}"
+ assert(output.include?('ACTIVE'), "Expected ACTIVE key after refresh, got: #{output}")
+
+ # 4. Disable a specific managed key
+ output = capture_stdout do
+ @shell.command('disable_managed_key', cust_and_namespace, key_metadata_hash)
+ end
+ puts "disable_managed_key output: #{output}"
+ assert(output.include?("#{cust} #{namespace} DISABLED"),
+ "Expected INACTIVE key, got: #{output}")
+ # Verify the key is now INACTIVE
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status after disable_managed_key: #{output}"
+ assert(output.include?('DISABLED'), "Expected DISABLED state, got: #{output}")
+
+ # 5. Re-enable key management for next step
+ @shell.command('enable_key_management', cust_and_namespace)
+
+ # 6. Disable all key management
+ output = capture_stdout { @shell.command('disable_key_management', cust_and_namespace) }
+ puts "disable_key_management output: #{output}"
+ assert(output.include?("#{cust} #{namespace} DISABLED"),
+ "Expected DISABLED keys, got: #{output}")
+ # Verify all keys are now INACTIVE
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status after disable_key_management: #{output}"
+ # All rows should show INACTIVE state
+ lines = output.split("\n")
+ key_lines = lines.select { |line| line.include?(cust) && line.include?(namespace) }
+ key_lines.each do |line|
+ assert(line.include?('INACTIVE'), "Expected all keys to be INACTIVE, but found: #{line}")
+ end
+
+ # 7. Refresh shouldn't do anything since the key management is disabled.
+ output = capture_stdout do
+ @shell.command('refresh_managed_keys', cust_and_namespace)
+ end
+ puts "refresh_managed_keys output: #{output}"
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status after refresh_managed_keys: #{output}"
+ assert(!output.include?(' ACTIVE '), "Expected all keys to be INACTIVE, but found: #{output}")
+
+ # 7. Enable key management again
+ @shell.command('enable_key_management', cust_and_namespace)
+
+ # 8. Get the key metadata hash for the enabled key
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status after enable_key_management: #{output}"
+ assert(output.include?('ACTIVE'), "Expected ACTIVE key after enable_key_management, got: #{output}")
+ assert(output.include?('1 row(s)'))
+ end
+
+ define_test 'Test refresh error handling' do
+ # Test refresh on non-existent key management (should not fail, just no-op)
+ cust_and_namespace = "#{$CUST1_ENCODED}:nonexistent_namespace"
+ output = capture_stdout do
+ @shell.command('refresh_managed_keys', cust_and_namespace)
+ end
+ puts "refresh_managed_keys on non-existent namespace: #{output}"
+ assert(output.include?('Managed keys refreshed successfully'),
+ "Expected success message even for non-existent namespace, got: #{output}")
+ end
+
+ define_test 'Test disable operations error handling' do
+ # Test disable_managed_key with invalid metadata hash
+ cust_and_namespace = "#{$CUST1_ENCODED}:*"
+ error = assert_raises(ArgumentError) do
+ @shell.command('disable_managed_key', cust_and_namespace, '!!!invalid!!!')
+ end
+ assert_match(/Failed to decode Base64 encoded string '!!!invalid!!!'/, error.message)
+
+ # Test disable_key_management on non-existent namespace (should succeed, no-op)
+ cust_and_namespace = "#{$CUST1_ENCODED}:nonexistent_for_disable"
+ output = capture_stdout { @shell.command('disable_key_management', cust_and_namespace) }
+ puts "disable_key_management on non-existent namespace: #{output}"
+ # Should show 0 rows since no keys exist
+ assert(output.include?('1 row(s)'))
+ assert(output.include?(" DISABLED "), "Expected DISABLED key, got: #{output}")
+ end
+ end
+end
diff --git a/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb
new file mode 100644
index 000000000000..35ad85785e0f
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb
@@ -0,0 +1,177 @@
+# frozen_string_literal: true
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+
+java_import org.apache.hadoop.conf.Configuration
+java_import org.apache.hadoop.fs.FSDataInputStream
+java_import org.apache.hadoop.hbase.CellUtil
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.client.Get
+java_import org.apache.hadoop.hbase.io.crypto.Encryption
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider
+java_import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider
+java_import org.apache.hadoop.hbase.io.hfile.CorruptHFileException
+java_import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
+java_import org.apache.hadoop.hbase.io.hfile.HFile
+java_import org.apache.hadoop.hbase.io.hfile.CacheConfig
+java_import org.apache.hadoop.hbase.util.Bytes
+
+module Hbase
+ # Test class for encrypted table keymeta functionality
+ class EncryptedTableKeymetaTest < Test::Unit::TestCase
+ include TestHelpers
+
+ def setup
+ setup_hbase
+ @test_table = "enctest#{Time.now.to_i}"
+ @connection = $TEST_CLUSTER.connection
+ end
+
+ define_test 'Test table put/get with encryption' do
+ # Custodian is currently not supported, so this will end up falling back to local key
+ # generation.
+ test_table_put_get_with_encryption($CUST1_ENCODED, '*',
+ { 'NAME' => 'f', 'ENCRYPTION' => 'AES' },
+ true)
+ end
+
+ define_test 'Test table with custom namespace attribute in Column Family' do
+ custom_namespace = 'test_global_namespace'
+ test_table_put_get_with_encryption(
+ $GLOB_CUST_ENCODED, custom_namespace,
+ { 'NAME' => 'f', 'ENCRYPTION' => 'AES', 'ENCRYPTION_KEY_NAMESPACE' => custom_namespace },
+ false
+ )
+ end
+
+ def test_table_put_get_with_encryption(cust, namespace, table_attrs, fallback_scenario)
+ cust_and_namespace = "#{cust}:#{namespace}"
+ output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
+ assert(output.include?("#{cust} #{namespace} ACTIVE"))
+ @shell.command(:create, @test_table, table_attrs)
+ test_table = table(@test_table)
+ test_table.put('1', 'f:a', '2')
+ puts "Added a row, now flushing table #{@test_table}"
+ command(:flush, @test_table)
+
+ table_name = TableName.valueOf(@test_table)
+ store_file_info = nil
+ $TEST_CLUSTER.getRSForFirstRegionInTable(table_name).getRegions(table_name).each do |region|
+ region.getStores.each do |store|
+ store.getStorefiles.each do |storefile|
+ store_file_info = storefile.getFileInfo
+ end
+ end
+ end
+ assert_not_nil(store_file_info)
+ hfile_info = store_file_info.getHFileInfo
+ assert_not_nil(hfile_info)
+ live_trailer = hfile_info.getTrailer
+ assert_trailer(live_trailer)
+ assert_equal(namespace, live_trailer.getKeyNamespace)
+
+ # When active key is supposed to be used, we can valiate the key bytes in the context against
+ # the actual key from provider.
+ unless fallback_scenario
+ encryption_context = hfile_info.getHFileContext.getEncryptionContext
+ assert_not_nil(encryption_context)
+ assert_not_nil(encryption_context.getKeyBytes)
+ key_provider = Encryption.getManagedKeyProvider($TEST_CLUSTER.getConfiguration)
+ key_data = key_provider.getManagedKey(ManagedKeyProvider.decodeToBytes(cust), namespace)
+ assert_not_nil(key_data)
+ assert_equal(namespace, key_data.getKeyNamespace)
+ assert_equal(key_data.getTheKey.getEncoded, encryption_context.getKeyBytes)
+ end
+
+ ## Disable table to ensure that the stores are not cached.
+ command(:disable, @test_table)
+ assert(!command(:is_enabled, @test_table))
+
+ # Open FSDataInputStream to the path pointed to by the store_file_info
+ fs = store_file_info.getFileSystem
+ fio = fs.open(store_file_info.getPath)
+ assert_not_nil(fio)
+ # Read trailer using FiledFileTrailer
+ offline_trailer = FixedFileTrailer.readFromStream(
+ fio, fs.getFileStatus(store_file_info.getPath).getLen
+ )
+ fio.close
+ assert_trailer(offline_trailer, live_trailer)
+
+ # Test for the ability to read HFile with encryption in an offline offline
+ reader = HFile.createReader(fs, store_file_info.getPath, CacheConfig::DISABLED, true,
+ $TEST_CLUSTER.getConfiguration)
+ assert_not_nil(reader)
+ offline_trailer = reader.getTrailer
+ assert_trailer(offline_trailer, live_trailer)
+ scanner = reader.getScanner($TEST_CLUSTER.getConfiguration, false, false)
+ assert_true(scanner.seekTo)
+ cell = scanner.getCell
+ assert_equal('1', Bytes.toString(CellUtil.cloneRow(cell)))
+ assert_equal('2', Bytes.toString(CellUtil.cloneValue(cell)))
+ assert_false(scanner.next)
+
+ # Confirm that the offline reading will fail with no config related to encryption
+ Encryption.clearKeyProviderCache
+ conf = Configuration.new($TEST_CLUSTER.getConfiguration)
+ conf.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.java_class.getName)
+ # This is expected to fail with CorruptHFileException.
+ e = assert_raises(CorruptHFileException) do
+ reader = HFile.createReader(fs, store_file_info.getPath, CacheConfig::DISABLED, true, conf)
+ end
+ assert_true(e.message.include?(
+ "Problem reading HFile Trailer from file #{store_file_info.getPath}"
+ ))
+ Encryption.clearKeyProviderCache
+
+ ## Enable back the table to be able to query.
+ command(:enable, @test_table)
+ assert(command(:is_enabled, @test_table))
+
+ get = Get.new(Bytes.toBytes('1'))
+ res = test_table.table.get(get)
+ puts "res for row '1' and column f:a: #{res}"
+ assert_false(res.isEmpty)
+ assert_equal('2', Bytes.toString(res.getValue(Bytes.toBytes('f'), Bytes.toBytes('a'))))
+ end
+
+ def assert_trailer(offline_trailer, live_trailer = nil)
+ assert_not_nil(offline_trailer)
+ assert_not_nil(offline_trailer.getEncryptionKey)
+ assert_not_nil(offline_trailer.getKEKMetadata)
+ assert_not_nil(offline_trailer.getKEKChecksum)
+ assert_not_nil(offline_trailer.getKeyNamespace)
+
+ return unless live_trailer
+
+ assert_equal(live_trailer.getEncryptionKey, offline_trailer.getEncryptionKey)
+ assert_equal(live_trailer.getKEKMetadata, offline_trailer.getKEKMetadata)
+ assert_equal(live_trailer.getKEKChecksum, offline_trailer.getKEKChecksum)
+ assert_equal(live_trailer.getKeyNamespace, offline_trailer.getKeyNamespace)
+ end
+ end
+end
diff --git a/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb b/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb
new file mode 100644
index 000000000000..d527eea8240c
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb
@@ -0,0 +1,663 @@
+# frozen_string_literal: true
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+require 'tempfile'
+require 'fileutils'
+
+java_import org.apache.hadoop.conf.Configuration
+java_import org.apache.hadoop.fs.FSDataInputStream
+java_import org.apache.hadoop.hbase.CellUtil
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.TableName
+java_import org.apache.hadoop.hbase.client.Get
+java_import org.apache.hadoop.hbase.client.Scan
+java_import org.apache.hadoop.hbase.io.crypto.Encryption
+java_import org.apache.hadoop.hbase.io.crypto.KeyStoreKeyProvider
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider
+java_import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
+java_import org.apache.hadoop.hbase.io.hfile.HFile
+java_import org.apache.hadoop.hbase.io.hfile.CacheConfig
+java_import org.apache.hadoop.hbase.util.Bytes
+java_import org.apache.hadoop.hbase.keymeta.KeymetaServiceEndpoint
+java_import org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor
+java_import org.apache.hadoop.hbase.security.EncryptionUtil
+java_import java.security.KeyStore
+java_import java.security.MessageDigest
+java_import javax.crypto.spec.SecretKeySpec
+java_import java.io.FileOutputStream
+java_import java.net.URLEncoder
+java_import java.util.Base64
+
+module Hbase
+ # Test class for key provider migration functionality
+ class KeyProviderKeymetaMigrationTest < Test::Unit::TestCase
+ include TestHelpers
+
+ def setup
+ @test_timestamp = Time.now.to_i.to_s
+ @master_key_alias = 'masterkey'
+ @shared_key_alias = 'sharedkey'
+ @table_key_alias = 'tablelevelkey'
+ @cf_key1_alias = 'cfkey1'
+ @cf_key2_alias = 'cfkey2'
+ @keystore_password = 'password'
+
+ # Test table names
+ @table_no_encryption = "no_enc_#{@test_timestamp}"
+ @table_random_key = "random_key_#{@test_timestamp}"
+ @table_table_key = "table_key_#{@test_timestamp}"
+ @table_shared_key1 = "shared1_#{@test_timestamp}"
+ @table_shared_key2 = "shared2_#{@test_timestamp}"
+ @table_cf_keys = "cf_keys_#{@test_timestamp}"
+
+ # Unified table metadata with CFs and expected namespaces
+ @tables_metadata = {
+ @table_no_encryption => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => nil },
+ no_encryption: true
+ },
+ @table_random_key => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => nil }
+ },
+ @table_table_key => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => @table_table_key }
+ },
+ @table_shared_key1 => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => 'shared-global-key' }
+ },
+ @table_shared_key2 => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => 'shared-global-key' }
+ },
+ @table_cf_keys => {
+ cfs: %w[cf1 cf2],
+ expected_namespace: {
+ 'cf1' => "#{@table_cf_keys}/cf1",
+ 'cf2' => "#{@table_cf_keys}/cf2"
+ }
+ }
+ }
+
+ # Setup initial KeyStoreKeyProvider
+ setup_old_key_provider
+ puts ' >> Starting Cluster'
+ $TEST.startMiniCluster
+ puts ' >> Cluster started'
+
+ setup_hbase
+ end
+
+ define_test 'Test complete key provider migration' do
+ puts '\n=== Starting Key Provider Migration Test ==='
+
+ # Step 1-3: Setup old provider and create tables
+ create_test_tables
+ puts '\n--- Validating initial table operations ---'
+ validate_pre_migration_operations(false)
+
+ # Step 4: Setup new provider and restart
+ setup_new_key_provider
+ restart_cluster_and_validate
+
+ # Step 5: Perform migration
+ migrate_tables_step_by_step
+
+ # Step 6: Cleanup and final validation
+ cleanup_old_provider_and_validate
+
+ puts '\n=== Migration Test Completed Successfully ==='
+ end
+
+ private
+
+ def setup_old_key_provider
+ puts '\n--- Setting up old KeyStoreKeyProvider ---'
+
+ # Use proper test directory (similar to KeymetaTestUtils.setupTestKeyStore)
+ test_data_dir = $TEST_CLUSTER.getDataTestDir("old_keystore_#{@test_timestamp}").toString
+ FileUtils.mkdir_p(test_data_dir)
+ @old_keystore_file = File.join(test_data_dir, 'keystore.jceks')
+ puts " >> Old keystore file: #{@old_keystore_file}"
+
+ # Create keystore with only the master key
+ # ENCRYPTION_KEY attributes generate their own keys and don't use keystore entries
+ create_keystore(@old_keystore_file, { @master_key_alias => generate_key(@master_key_alias) })
+
+ # Configure old KeyStoreKeyProvider
+ provider_uri = "jceks://#{File.expand_path(@old_keystore_file)}?" \
+ "password=#{@keystore_password}"
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY,
+ KeyStoreKeyProvider.java_class.name)
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_KEYPROVIDER_PARAMETERS_KEY,
+ provider_uri)
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MASTERKEY_NAME_CONF_KEY,
+ @master_key_alias)
+
+ puts " >> Old KeyStoreKeyProvider configured with keystore: #{@old_keystore_file}"
+ end
+
+ def create_test_tables
+ puts '\n--- Creating test tables ---'
+
+ # 1. Table without encryption
+ command(:create, @table_no_encryption, { 'NAME' => 'f' })
+ puts " >> Created table #{@table_no_encryption} without encryption"
+
+ # 2. Table with random key (no explicit key set)
+ command(:create, @table_random_key, { 'NAME' => 'f', 'ENCRYPTION' => 'AES' })
+ puts " >> Created table #{@table_random_key} with random key"
+
+ # 3. Table with table-level key
+ command(:create, @table_table_key, { 'NAME' => 'f', 'ENCRYPTION' => 'AES',
+ 'ENCRYPTION_KEY' => @table_key_alias })
+ puts " >> Created table #{@table_table_key} with table-level key"
+
+ # 4. First table with shared key
+ command(:create, @table_shared_key1, { 'NAME' => 'f', 'ENCRYPTION' => 'AES',
+ 'ENCRYPTION_KEY' => @shared_key_alias })
+ puts " >> Created table #{@table_shared_key1} with shared key"
+
+ # 5. Second table with shared key
+ command(:create, @table_shared_key2, { 'NAME' => 'f', 'ENCRYPTION' => 'AES',
+ 'ENCRYPTION_KEY' => @shared_key_alias })
+ puts " >> Created table #{@table_shared_key2} with shared key"
+
+ # 6. Table with column family specific keys
+ command(:create, @table_cf_keys,
+ { 'NAME' => 'cf1', 'ENCRYPTION' => 'AES', 'ENCRYPTION_KEY' => @cf_key1_alias },
+ { 'NAME' => 'cf2', 'ENCRYPTION' => 'AES', 'ENCRYPTION_KEY' => @cf_key2_alias })
+ puts " >> Created table #{@table_cf_keys} with CF-specific keys"
+ end
+
+ def validate_pre_migration_operations(is_key_management_enabled)
+ @tables_metadata.each do |table_name, metadata|
+ puts " >> test_table_operations on table: #{table_name} with CFs: " \
+ "#{metadata[:cfs].join(', ')}"
+ next if metadata[:no_encryption]
+
+ test_table_operations(table_name, metadata[:cfs])
+ check_hfile_trailers_pre_migration(table_name, metadata[:cfs], is_key_management_enabled)
+ end
+ end
+
+ def test_table_operations(table_name, column_families)
+ puts " >> Testing operations on table #{table_name}"
+
+ test_table = table(table_name)
+
+ column_families.each do |cf|
+ puts " >> Running put operations on CF: #{cf} in table: #{table_name}"
+ # Put data
+ test_table.put('row1', "#{cf}:col1", 'value1')
+ test_table.put('row2', "#{cf}:col2", 'value2')
+ end
+
+ # Flush table
+ puts " >> Flushing table: #{table_name}"
+ $TEST_CLUSTER.flush(TableName.valueOf(table_name))
+
+ # Get data and validate
+ column_families.each do |cf|
+ puts " >> Validating data in CF: #{cf} in table: #{table_name}"
+ get_result = test_table.table.get(Get.new(Bytes.toBytes('row1')))
+ assert_false(get_result.isEmpty)
+ assert_equal('value1',
+ Bytes.toString(get_result.getValue(Bytes.toBytes(cf), Bytes.toBytes('col1'))))
+ end
+
+ puts " >> Operations validated for #{table_name}"
+ end
+
+ def setup_new_key_provider
+ puts '\n--- Setting up new ManagedKeyStoreKeyProvider ---'
+
+ # Use proper test directory (similar to KeymetaTestUtils.setupTestKeyStore)
+ test_data_dir = $TEST_CLUSTER.getDataTestDir("new_keystore_#{@test_timestamp}").toString
+ FileUtils.mkdir_p(test_data_dir)
+ @new_keystore_file = File.join(test_data_dir, 'managed_keystore.jceks')
+ puts " >> New keystore file: #{@new_keystore_file}"
+
+ # Extract wrapped keys from encrypted tables and unwrap them
+ migrated_keys = extract_and_unwrap_keys_from_tables
+
+ # Create new keystore with migrated keys
+ create_keystore(@new_keystore_file, migrated_keys)
+
+ # Configure ManagedKeyStoreKeyProvider
+ provider_uri = "jceks://#{File.expand_path(@new_keystore_file)}?" \
+ "password=#{@keystore_password}"
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, 'true')
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ ManagedKeyStoreKeyProvider.java_class.name)
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY,
+ provider_uri)
+ $TEST_CLUSTER.getConfiguration.set(
+ HConstants::CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY,
+ 'system_key'
+ )
+
+ # Setup key configurations for ManagedKeyStoreKeyProvider
+ # Shared key configuration
+ $TEST_CLUSTER.getConfiguration.set(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.shared-global-key.alias",
+ 'shared_global_key'
+ )
+ $TEST_CLUSTER.getConfiguration.setBoolean(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.shared-global-key.active",
+ true
+ )
+
+ # Table-level key configuration - let system determine namespace automatically
+ $TEST_CLUSTER.getConfiguration.set(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_table_key}.alias",
+ "#{@table_table_key}_key"
+ )
+ $TEST_CLUSTER.getConfiguration.setBoolean(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_table_key}.active",
+ true
+ )
+
+ # CF-level key configurations - let system determine namespace automatically
+ $TEST_CLUSTER.getConfiguration.set(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf1.alias",
+ "#{@table_cf_keys}_cf1_key"
+ )
+ $TEST_CLUSTER.getConfiguration.setBoolean(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf1.active",
+ true
+ )
+
+ $TEST_CLUSTER.getConfiguration.set(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf2.alias",
+ "#{@table_cf_keys}_cf2_key"
+ )
+ $TEST_CLUSTER.getConfiguration.setBoolean(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf2.active",
+ true
+ )
+
+ # Enable KeyMeta coprocessor
+ $TEST_CLUSTER.getConfiguration.set('hbase.coprocessor.master.classes',
+ KeymetaServiceEndpoint.java_class.name)
+
+ puts ' >> New ManagedKeyStoreKeyProvider configured'
+ end
+
+ def restart_cluster_and_validate
+ puts '\n--- Restarting cluster with managed key store key provider ---'
+
+ $TEST.restartMiniCluster(KeymetaTableAccessor::KEY_META_TABLE_NAME)
+ puts ' >> Cluster restarted with ManagedKeyStoreKeyProvider'
+ setup_hbase
+
+ # Validate key management service is functional
+ output = capture_stdout { command(:show_key_status, "#{$GLOB_CUST_ENCODED}:*") }
+ assert(output.include?('0 row(s)'), "Expected 0 rows from show_key_status, got: #{output}")
+ puts ' >> Key management service is functional'
+
+ # Test operations still work and check HFile trailers
+ puts '\n--- Validating operations after restart ---'
+ validate_pre_migration_operations(true)
+ end
+
+ def check_hfile_trailers_pre_migration(table_name, column_families, is_key_management_enabled)
+ puts " >> Checking HFile trailers for #{table_name} with CFs: " \
+ "#{column_families.join(', ')}"
+
+ column_families.each do |cf_name|
+ validate_hfile_trailer(table_name, cf_name, false, is_key_management_enabled, false)
+ end
+ end
+
+ def migrate_tables_step_by_step
+ puts '\n--- Performing step-by-step table migration ---'
+
+ # Migrate shared key tables first
+ migrate_shared_key_tables
+
+ # Migrate table-level key
+ migrate_table_level_key
+
+ # Migrate CF-level keys
+ migrate_cf_level_keys
+ end
+
+ def migrate_shared_key_tables
+ puts '\n--- Migrating shared key tables ---'
+
+ # Enable key management for shared global key
+ cust_and_namespace = "#{$GLOB_CUST_ENCODED}:shared-global-key"
+ output = capture_stdout { command(:enable_key_management, cust_and_namespace) }
+ assert(output.include?("#{$GLOB_CUST_ENCODED} shared-global-key ACTIVE"),
+ "Expected ACTIVE status for shared key, got: #{output}")
+ puts ' >> Enabled key management for shared global key'
+
+ # Migrate first shared key table
+ migrate_table_to_managed_key(@table_shared_key1, 'f', 'shared-global-key',
+ use_namespace_attribute: true)
+
+ # Migrate second shared key table
+ migrate_table_to_managed_key(@table_shared_key2, 'f', 'shared-global-key',
+ use_namespace_attribute: true)
+ end
+
+ def migrate_table_level_key
+ puts '\n--- Migrating table-level key ---'
+
+ # Enable key management for table namespace
+ cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{@table_table_key}"
+ output = capture_stdout { command(:enable_key_management, cust_and_namespace) }
+ assert(output.include?("#{$GLOB_CUST_ENCODED} #{@table_table_key} ACTIVE"),
+ "Expected ACTIVE status for table key, got: #{output}")
+ puts ' >> Enabled key management for table-level key'
+
+ # Migrate the table - no namespace attribute, let system auto-determine
+ migrate_table_to_managed_key(@table_table_key, 'f', @table_table_key)
+ end
+
+ def migrate_cf_level_keys
+ puts '\n--- Migrating CF-level keys ---'
+
+ # Enable key management for CF1
+ cf1_namespace = "#{@table_cf_keys}/cf1"
+ cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{cf1_namespace}"
+ output = capture_stdout { command(:enable_key_management, cust_and_namespace) }
+ assert(output.include?("#{$GLOB_CUST_ENCODED} #{cf1_namespace} ACTIVE"),
+ "Expected ACTIVE status for CF1 key, got: #{output}")
+ puts ' >> Enabled key management for CF1'
+
+ # Enable key management for CF2
+ cf2_namespace = "#{@table_cf_keys}/cf2"
+ cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{cf2_namespace}"
+ output = capture_stdout { command(:enable_key_management, cust_and_namespace) }
+ assert(output.include?("#{$GLOB_CUST_ENCODED} #{cf2_namespace} ACTIVE"),
+ "Expected ACTIVE status for CF2 key, got: #{output}")
+ puts ' >> Enabled key management for CF2'
+
+ # Migrate CF1
+ migrate_table_to_managed_key(@table_cf_keys, 'cf1', cf1_namespace)
+
+ # Migrate CF2
+ migrate_table_to_managed_key(@table_cf_keys, 'cf2', cf2_namespace)
+ end
+
+ def migrate_table_to_managed_key(table_name, cf_name, namespace,
+ use_namespace_attribute: false)
+ puts " >> Migrating table #{table_name}, CF #{cf_name} to namespace #{namespace}"
+
+ # Use atomic alter operation to remove ENCRYPTION_KEY and optionally add
+ # ENCRYPTION_KEY_NAMESPACE
+ if use_namespace_attribute
+ # For shared key tables: remove ENCRYPTION_KEY and add ENCRYPTION_KEY_NAMESPACE atomically
+ command(:alter, table_name,
+ { 'NAME' => cf_name,
+ 'CONFIGURATION' => { 'ENCRYPTION_KEY' => '',
+ 'ENCRYPTION_KEY_NAMESPACE' => namespace } })
+ else
+ # For table/CF level keys: just remove ENCRYPTION_KEY, let system auto-determine namespace
+ command(:alter, table_name,
+ { 'NAME' => cf_name, 'CONFIGURATION' => { 'ENCRYPTION_KEY' => '' } })
+ end
+
+ puts " >> Altered #{table_name} CF #{cf_name} to use namespace #{namespace}"
+
+ # The CF alter should trigger an online schema change and should cause the stores to be
+ # reopened and the encryption context to be reinitialized, but it is asynchronous and may take
+ # some time, so we sleep for 5s, following the same pattern as in the
+ # TestEncryptionKeyRotation.testCFKeyRotation().
+ sleep(5)
+
+ # Scan all existing data to verify accessibility
+ scan_and_validate_table(table_name)
+
+ # Add new data
+ test_table = table(table_name)
+ test_table.put('new_row', "#{cf_name}:new_col", 'new_value')
+
+ # Flush and validate trailer
+ $TEST_CLUSTER.flush(TableName.valueOf(table_name))
+ validate_hfile_trailer(table_name, cf_name, true, true, false, namespace)
+
+ puts " >> Migration completed for #{table_name} CF #{cf_name}"
+ end
+
+ def scan_and_validate_table(table_name)
+ puts " >> Scanning and validating existing data in #{table_name}"
+
+ test_table = table(table_name)
+ scan = Scan.new
+ scanner = test_table.table.getScanner(scan)
+
+ row_count = 0
+ while (result = scanner.next)
+ row_count += 1
+ assert_false(result.isEmpty)
+ end
+ scanner.close
+
+ assert(row_count.positive?, "Expected to find existing data in #{table_name}")
+ puts " >> Found #{row_count} rows, all accessible"
+ end
+
+ def validate_hfile_trailer(table_name, cf_name, is_post_migration, is_key_management_enabled,
+ is_compacted, expected_namespace = nil)
+ context = is_post_migration ? 'migrated' : 'pre-migration'
+ puts " >> Validating HFile trailer for #{context} table #{table_name}, CF: #{cf_name}"
+
+ table_name_obj = TableName.valueOf(table_name)
+ region_servers = $TEST_CLUSTER.getRSForFirstRegionInTable(table_name_obj)
+ regions = region_servers.getRegions(table_name_obj)
+
+ regions.each do |region|
+ region.getStores.each do |store|
+ next unless store.getColumnFamilyName == cf_name
+
+ puts " >> store file count for CF: #{cf_name} in table: #{table_name} is " \
+ "#{store.getStorefiles.size}"
+ if is_compacted
+ assert_equal(1, store.getStorefiles.size)
+ else
+ assert_true(!store.getStorefiles.empty?)
+ end
+ store.getStorefiles.each do |storefile|
+ puts " >> Checking HFile trailer for storefile: #{storefile.getPath.getName} " \
+ "with sequence id: #{storefile.getMaxSequenceId} against max sequence id of " \
+ "store: #{store.getMaxSequenceId.getAsLong}"
+ # The flush would have created new HFiles, but the old would still be there
+ # so we need to make sure to check the latest store only.
+ next unless storefile.getMaxSequenceId == store.getMaxSequenceId.getAsLong
+
+ store_file_info = storefile.getFileInfo
+ next unless store_file_info
+
+ hfile_info = store_file_info.getHFileInfo
+ next unless hfile_info
+
+ trailer = hfile_info.getTrailer
+
+ assert_not_nil(trailer.getEncryptionKey)
+
+ if is_key_management_enabled
+ assert_not_nil(trailer.getKEKMetadata)
+ assert_not_equal(0, trailer.getKEKChecksum)
+ else
+ assert_nil(trailer.getKEKMetadata)
+ assert_equal(0, trailer.getKEKChecksum)
+ end
+
+ if is_post_migration
+ assert_equal(expected_namespace, trailer.getKeyNamespace)
+ puts " >> Trailer validation passed - namespace: #{trailer.getKeyNamespace}"
+ else
+ assert_nil(trailer.getKeyNamespace)
+ puts ' >> Trailer validation passed - using legacy key format'
+ end
+ end
+ end
+ end
+ end
+
+ def cleanup_old_provider_and_validate
+ puts '\n--- Cleaning up old key provider and final validation ---'
+
+ # Remove old KeyProvider configurations
+ $TEST_CLUSTER.getConfiguration.unset(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY)
+ $TEST_CLUSTER.getConfiguration.unset(HConstants::CRYPTO_KEYPROVIDER_PARAMETERS_KEY)
+ $TEST_CLUSTER.getConfiguration.unset(HConstants::CRYPTO_MASTERKEY_NAME_CONF_KEY)
+
+ # Remove old keystore
+ FileUtils.rm_rf(@old_keystore_file) if File.directory?(@old_keystore_file)
+ puts ' >> Removed old keystore and configuration'
+
+ # Restart cluster
+ $TEST.restartMiniCluster(KeymetaTableAccessor::KEY_META_TABLE_NAME)
+ puts ' >> Cluster restarted without old key provider'
+ setup_hbase
+
+ # Validate all data is still accessible
+ validate_all_tables_final
+
+ # Perform major compaction and validate
+ perform_major_compaction_and_validate
+ end
+
+ def validate_all_tables_final
+ puts '\n--- Final validation - scanning all tables ---'
+
+ @tables_metadata.each do |table_name, metadata|
+ next if metadata[:no_encryption]
+
+ puts " >> Final validation for table: #{table_name} with CFs: #{metadata[:cfs].join(', ')}"
+ scan_and_validate_table(table_name)
+ puts " >> #{table_name} - all data accessible"
+ end
+ end
+
+ def perform_major_compaction_and_validate
+ puts '\n--- Performing major compaction and final validation ---'
+
+ $TEST_CLUSTER.compact(true)
+
+ @tables_metadata.each do |table_name, metadata|
+ next if metadata[:no_encryption]
+
+ puts " >> Validating post-compaction HFiles for table: #{table_name} with " \
+ "CFs: #{metadata[:cfs].join(', ')}"
+ metadata[:cfs].each do |cf_name|
+ # When using random key from system key, there is no namespace
+ validate_hfile_trailer(table_name, cf_name, true, true, true,
+ metadata[:expected_namespace][cf_name])
+ end
+ end
+ end
+
+ # Utility methods
+
+ def extract_and_unwrap_keys_from_tables
+ puts ' >> Extracting and unwrapping keys from encrypted tables'
+
+ keys = {}
+
+ # Reuse existing master key from old keystore as system key
+ old_key_provider = Encryption.getKeyProvider($TEST_CLUSTER.getConfiguration)
+ master_key_bytes = old_key_provider.getKey(@master_key_alias).getEncoded
+ keys['system_key'] = master_key_bytes
+
+ # Extract wrapped keys from table descriptors and unwrap them
+ # Only call extract_key_from_table for tables that have ENCRYPTION_KEY attribute
+
+ # For shared key tables (both use same key)
+ shared_key = extract_key_from_table(@table_shared_key1, 'f')
+ keys['shared_global_key'] = shared_key
+
+ # For table-level key
+ table_key = extract_key_from_table(@table_table_key, 'f')
+ keys["#{@table_table_key}_key"] = table_key
+
+ # For CF-level keys
+ cf1_key = extract_key_from_table(@table_cf_keys, 'cf1')
+ keys["#{@table_cf_keys}_cf1_key"] = cf1_key
+
+ cf2_key = extract_key_from_table(@table_cf_keys, 'cf2')
+ keys["#{@table_cf_keys}_cf2_key"] = cf2_key
+
+ puts " >> Extracted #{keys.size} keys for migration"
+ keys
+ end
+
+ def extract_key_from_table(table_name, cf_name)
+ # Get table descriptor
+ admin = $TEST_CLUSTER.getAdmin
+ table_descriptor = admin.getDescriptor(TableName.valueOf(table_name))
+ cf_descriptor = table_descriptor.getColumnFamily(Bytes.toBytes(cf_name))
+
+ # Get the wrapped key bytes from ENCRYPTION_KEY attribute
+ wrapped_key_bytes = cf_descriptor.getEncryptionKey
+
+ # Use EncryptionUtil.unwrapKey with master key alias as subject
+ unwrapped_key = EncryptionUtil.unwrapKey($TEST_CLUSTER.getConfiguration,
+ @master_key_alias, wrapped_key_bytes)
+
+ unwrapped_key.getEncoded
+ end
+
+ def generate_key(alias_name)
+ MessageDigest.getInstance('SHA-256').digest(Bytes.toBytes(alias_name))
+ end
+
+ def create_keystore(keystore_path, key_entries)
+ store = KeyStore.getInstance('JCEKS')
+ password_chars = @keystore_password.to_java.toCharArray
+ store.load(nil, password_chars)
+
+ key_entries.each do |alias_name, key_bytes|
+ secret_key = SecretKeySpec.new(key_bytes, 'AES')
+ store.setEntry(alias_name, KeyStore::SecretKeyEntry.new(secret_key),
+ KeyStore::PasswordProtection.new(password_chars))
+ end
+
+ fos = FileOutputStream.new(keystore_path)
+ begin
+ store.store(fos, password_chars)
+ ensure
+ fos.close
+ end
+ end
+
+ def teardown
+ # Cleanup temporary test directories (keystore files will be cleaned up with the directories)
+ test_base_dir = $TEST_CLUSTER.getDataTestDir.toString
+ Dir.glob(File.join(test_base_dir, "*keystore_#{@test_timestamp}*")).each do |dir|
+ FileUtils.rm_rf(dir) if File.directory?(dir)
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/test/ruby/shell/rotate_stk_keymeta_mock_provider_test.rb b/hbase-shell/src/test/ruby/shell/rotate_stk_keymeta_mock_provider_test.rb
new file mode 100644
index 000000000000..77a2a339552e
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/rotate_stk_keymeta_mock_provider_test.rb
@@ -0,0 +1,59 @@
+# frozen_string_literal: true
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+
+java_import org.apache.hadoop.hbase.io.crypto.Encryption
+java_import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider
+module Hbase
+ # Test class for rotate_stk command
+ class RotateSTKKeymetaTest < Test::Unit::TestCase
+ include TestHelpers
+
+ def setup
+ setup_hbase
+ end
+
+ define_test 'Test rotate_stk command' do
+ puts 'Testing rotate_stk command'
+
+ # this should return false (no rotation performed)
+ output = capture_stdout { @shell.command(:rotate_stk) }
+ puts "rotate_stk output: #{output}"
+ assert(output.include?('No System Key change was detected'),
+ "Expected output to contain rotation status message, but got: #{output}")
+
+ key_provider = Encryption.getManagedKeyProvider($TEST_CLUSTER.getConfiguration)
+ # Once we enable multikeyGenMode on MockManagedKeyProvider, every call should return a new key
+ # which should trigger a rotation.
+ key_provider.setMultikeyGenMode(true)
+ output = capture_stdout { @shell.command(:rotate_stk) }
+ puts "rotate_stk output: #{output}"
+ assert(output.include?('System Key rotation was performed successfully and cache was ' \
+ 'refreshed on all region servers'),
+ "Expected output to contain rotation status message, but got: #{output}")
+ end
+ end
+end
diff --git a/hbase-shell/src/test/ruby/tests_runner.rb b/hbase-shell/src/test/ruby/tests_runner.rb
index 4e31b81535a7..8f51a168a011 100644
--- a/hbase-shell/src/test/ruby/tests_runner.rb
+++ b/hbase-shell/src/test/ruby/tests_runner.rb
@@ -40,6 +40,8 @@
end
files = Dir[ File.dirname(__FILE__) + "/" + test_suite_pattern ]
+raise "No tests found for #{test_suite_pattern}" if files.empty?
+
files.each do |file|
filename = File.basename(file)
if includes != nil && !includes.include?(filename)
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index dc4bc1816acc..659bfe34067a 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -89,6 +89,7 @@
import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdminClient;
import org.apache.hadoop.hbase.logging.Log4jUtils;
import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
import org.apache.hadoop.hbase.master.HMaster;
@@ -201,6 +202,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/** This is for unit tests parameterized with a single boolean. */
public static final List MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination();
+ private Admin hbaseAdmin = null;
+
/**
* Checks to see if a specific port is available.
* @param port the port number to check for availability
@@ -2942,7 +2945,9 @@ public Admin getAdmin() throws IOException {
return hbaseAdmin;
}
- private Admin hbaseAdmin = null;
+ public KeymetaAdminClient getKeymetaAdmin() throws IOException {
+ return new KeymetaAdminClient(getConnection());
+ }
/**
* Returns an {@link Hbck} instance. Needs be closed when done.
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
index 3d5a7e502e0a..3b52b916efe8 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -1376,4 +1376,23 @@ public boolean isReplicationPeerModificationEnabled() throws IOException {
throw new NotImplementedException(
"isReplicationPeerModificationEnabled not supported in ThriftAdmin");
}
+
+ @Override
+ public void refreshSystemKeyCacheOnServers(List regionServers) throws IOException {
+ throw new NotImplementedException(
+ "refreshSystemKeyCacheOnServers not supported in ThriftAdmin");
+ }
+
+ @Override
+ public void ejectManagedKeyDataCacheEntryOnServers(List regionServers,
+ byte[] keyCustodian, String keyNamespace, String keyMetadata) throws IOException {
+ throw new NotImplementedException(
+ "ejectManagedKeyDataCacheEntryOnServers not supported in ThriftAdmin");
+ }
+
+ @Override
+ public void clearManagedKeyDataCacheOnServers(List regionServers) throws IOException {
+ throw new NotImplementedException(
+ "clearManagedKeyDataCacheOnServers not supported in ThriftAdmin");
+ }
}