From 211132cce11f8a96f01f71a78a17ef044b0a8730 Mon Sep 17 00:00:00 2001
From: Hari Krishna Dara
Date: Wed, 6 Aug 2025 02:33:13 +0530
Subject: [PATCH 1/9] HBASE-29402: Comprehensive key management for encryption
at rest (#7111)
---
.gitignore | 5 +
.../hbase/keymeta/KeymetaAdminClient.java | 86 +++
.../hadoop/hbase/security/EncryptionUtil.java | 56 +-
.../org/apache/hadoop/hbase/HConstants.java | 33 +
.../hadoop/hbase/io/crypto/Encryption.java | 50 +-
.../hbase/io/crypto/KeyStoreKeyProvider.java | 10 +-
.../hbase/io/crypto/ManagedKeyData.java | 289 +++++++++
.../hbase/io/crypto/ManagedKeyProvider.java | 103 +++
.../hbase/io/crypto/ManagedKeyState.java | 80 +++
.../io/crypto/ManagedKeyStoreKeyProvider.java | 106 +++
.../hadoop/hbase/keymeta/KeymetaAdmin.java | 57 ++
.../org/apache/hadoop/hbase/util/Bytes.java | 17 +-
.../hadoop/hbase/util/CommonFSUtils.java | 4 +
.../apache/hadoop/hbase/util/GsonUtil.java | 11 +
.../io/crypto/MockManagedKeyProvider.java | 167 +++++
.../io/crypto/TestKeyStoreKeyProvider.java | 65 +-
.../hbase/io/crypto/TestManagedKeyData.java | 212 ++++++
.../io/crypto/TestManagedKeyProvider.java | 299 +++++++++
.../main/protobuf/server/ManagedKeys.proto | 56 ++
.../apache/hadoop/hbase/HBaseServerBase.java | 33 +
.../hbase/MockRegionServerServices.java | 15 +
.../java/org/apache/hadoop/hbase/Server.java | 20 +-
.../hbase/keymeta/KeyManagementBase.java | 163 +++++
.../hbase/keymeta/KeymetaAdminImpl.java | 71 +++
.../hbase/keymeta/KeymetaMasterService.java | 60 ++
.../hbase/keymeta/KeymetaServiceEndpoint.java | 182 ++++++
.../hbase/keymeta/KeymetaTableAccessor.java | 321 ++++++++++
.../hbase/keymeta/ManagedKeyDataCache.java | 273 ++++++++
.../hbase/keymeta/SystemKeyAccessor.java | 141 ++++
.../hadoop/hbase/keymeta/SystemKeyCache.java | 76 +++
.../apache/hadoop/hbase/master/HMaster.java | 10 +
.../hadoop/hbase/master/MasterFileSystem.java | 4 +
.../hadoop/hbase/master/SplitWALManager.java | 1 -
.../hadoop/hbase/master/SystemKeyManager.java | 123 ++++
.../hbase/regionserver/HRegionServer.java | 5 +-
.../regionserver/ReplicationSyncUp.java | 15 +
.../hadoop/hbase/security/SecurityUtil.java | 12 +
.../hbase/keymeta/DummyKeyProvider.java | 38 ++
.../ManagedKeyProviderInterceptor.java | 92 +++
.../hbase/keymeta/ManagedKeyTestBase.java | 48 ++
.../hbase/keymeta/TestKeyManagementBase.java | 66 ++
.../hbase/keymeta/TestKeymetaEndpoint.java | 333 ++++++++++
.../keymeta/TestKeymetaMasterService.java | 200 ++++++
.../keymeta/TestKeymetaTableAccessor.java | 437 +++++++++++++
.../keymeta/TestManagedKeyDataCache.java | 601 ++++++++++++++++++
.../hbase/keymeta/TestManagedKeymeta.java | 150 +++++
.../hbase/keymeta/TestSystemKeyCache.java | 310 +++++++++
.../hbase/master/MockNoopMasterServices.java | 15 +
.../hadoop/hbase/master/MockRegionServer.java | 15 +
.../hbase/master/TestActiveMasterManager.java | 15 +
.../hbase/master/TestKeymetaAdminImpl.java | 278 ++++++++
.../TestSystemKeyAccessorAndManager.java | 521 +++++++++++++++
.../hbase/master/TestSystemKeyManager.java | 119 ++++
.../cleaner/TestReplicationHFileCleaner.java | 15 +
...onProcedureStorePerformanceEvaluation.java | 15 +
.../regionserver/TestHeapMemoryManager.java | 15 +
.../token/TestTokenAuthentication.java | 15 +
.../apache/hadoop/hbase/util/MockServer.java | 15 +
hbase-shell/src/main/ruby/hbase/hbase.rb | 4 +
.../src/main/ruby/hbase/keymeta_admin.rb | 56 ++
hbase-shell/src/main/ruby/hbase_constants.rb | 1 +
hbase-shell/src/main/ruby/shell.rb | 16 +
hbase-shell/src/main/ruby/shell/commands.rb | 4 +
.../shell/commands/enable_key_management.rb | 45 ++
.../shell/commands/keymeta_command_base.rb | 45 ++
.../ruby/shell/commands/show_key_status.rb | 45 ++
.../hadoop/hbase/HBaseTestingUtility.java | 7 +-
67 files changed, 6721 insertions(+), 46 deletions(-)
create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java
create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java
create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
create mode 100644 hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
create mode 100644 hbase-shell/src/main/ruby/hbase/keymeta_admin.rb
create mode 100644 hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb
create mode 100644 hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb
create mode 100644 hbase-shell/src/main/ruby/shell/commands/show_key_status.rb
diff --git a/.gitignore b/.gitignore
index 1a67643fdd17..fc93b1447ba1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,5 +25,10 @@ linklint/
**/*.log
tmp
**/.flattened-pom.xml
+.*.sw*
+ID
+filenametags
+tags
+.codegenie/
.vscode/
**/__pycache__
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
new file mode 100644
index 000000000000..8092dee1fc37
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
+@InterfaceAudience.Public
+public class KeymetaAdminClient implements KeymetaAdmin {
+ private static final Logger LOG = LoggerFactory.getLogger(KeymetaAdminClient.class);
+ private ManagedKeysProtos.ManagedKeysService.BlockingInterface stub;
+
+ public KeymetaAdminClient(Connection conn) throws IOException {
+ this.stub = ManagedKeysProtos.ManagedKeysService.newBlockingStub(
+ conn.getAdmin().coprocessorService());
+ }
+
+ @Override
+ public List enableKeyManagement(String keyCust, String keyNamespace)
+ throws IOException {
+ try {
+ ManagedKeysProtos.GetManagedKeysResponse response = stub.enableKeyManagement(null,
+ ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build());
+ return generateKeyDataList(response);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ @Override
+ public List getManagedKeys(String keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ try {
+ ManagedKeysProtos.GetManagedKeysResponse statusResponse = stub.getManagedKeys(null,
+ ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build());
+ return generateKeyDataList(statusResponse);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ private static List generateKeyDataList(
+ ManagedKeysProtos.GetManagedKeysResponse stateResponse) {
+ List keyStates = new ArrayList<>();
+ for (ManagedKeysResponse state: stateResponse.getStateList()) {
+ keyStates.add(new ManagedKeyData(
+ state.getKeyCustBytes().toByteArray(),
+ state.getKeyNamespace(), null,
+ ManagedKeyState.forValue((byte) state.getKeyState().getNumber()),
+ state.getKeyMetadata(),
+ state.getRefreshTimestamp()));
+ }
+ return keyStates;
+ }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 04fc5201cc10..91630215e75d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -80,6 +80,21 @@ public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm)
* @return the encrypted key bytes
*/
public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException {
+ return wrapKey(conf, subject, key, null);
+ }
+
+ /**
+ * Protect a key by encrypting it with the secret key of the given subject or kek. The
+ * configuration must be set up correctly for key alias resolution. Only one of the
+ * {@code subject} or {@code kek} needs to be specified and the other one can be {@code null}.
+ * @param conf configuration
+ * @param subject subject key alias
+ * @param key the key
+ * @param kek the key encryption key
+ * @return the encrypted key bytes
+ */
+ public static byte[] wrapKey(Configuration conf, String subject, Key key, Key kek)
+ throws IOException {
// Wrap the key with the configured encryption algorithm.
String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Cipher cipher = Encryption.getCipher(conf, algorithm);
@@ -100,8 +115,13 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws
builder
.setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes)));
ByteArrayOutputStream out = new ByteArrayOutputStream();
- Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher,
- iv);
+ if (kek != null) {
+ Encryption.encryptWithGivenKey(kek, out, new ByteArrayInputStream(keyBytes), cipher, iv);
+ }
+ else {
+ Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf,
+ cipher, iv);
+ }
builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray()));
// Build and return the protobuf message
out.reset();
@@ -118,6 +138,21 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws
* @return the raw key bytes
*/
public static Key unwrapKey(Configuration conf, String subject, byte[] value)
+ throws IOException, KeyException {
+ return unwrapKey(conf, subject, value, null);
+ }
+
+ /**
+ * Unwrap a key by decrypting it with the secret key of the given subject. The configuration must
+ * be set up correctly for key alias resolution. Only one of the {@code subject} or {@code kek}
+ * needs to be specified and the other one can be {@code null}.
+ * @param conf configuration
+ * @param subject subject key alias
+ * @param value the encrypted key bytes
+ * @param kek the key encryption key
+ * @return the raw key bytes
+ */
+ public static Key unwrapKey(Configuration conf, String subject, byte[] value, Key kek)
throws IOException, KeyException {
EncryptionProtos.WrappedKey wrappedKey =
EncryptionProtos.WrappedKey.parser().parseDelimitedFrom(new ByteArrayInputStream(value));
@@ -126,11 +161,12 @@ public static Key unwrapKey(Configuration conf, String subject, byte[] value)
if (cipher == null) {
throw new RuntimeException("Cipher '" + algorithm + "' not available");
}
- return getUnwrapKey(conf, subject, wrappedKey, cipher);
+ return getUnwrapKey(conf, subject, wrappedKey, cipher, kek);
}
private static Key getUnwrapKey(Configuration conf, String subject,
- EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException {
+ EncryptionProtos.WrappedKey wrappedKey, Cipher cipher, Key kek)
+ throws IOException, KeyException {
String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf);
String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim();
if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) {
@@ -143,8 +179,14 @@ private static Key getUnwrapKey(Configuration conf, String subject,
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null;
- Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(),
- subject, conf, cipher, iv);
+ if (kek != null) {
+ Encryption.decryptWithGivenKey(kek, out, wrappedKey.getData().newInput(),
+ wrappedKey.getLength(), cipher, iv);
+ }
+ else {
+ Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(),
+ subject, conf, cipher, iv);
+ }
byte[] keyBytes = out.toByteArray();
if (wrappedKey.hasHash()) {
if (
@@ -176,7 +218,7 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value)
if (cipher == null) {
throw new RuntimeException("Cipher '" + algorithm + "' not available");
}
- return getUnwrapKey(conf, subject, wrappedKey, cipher);
+ return getUnwrapKey(conf, subject, wrappedKey, cipher, null);
}
/**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 29def9978184..40e371edca20 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1193,6 +1193,11 @@ public enum OperationStatusCode {
/** Temporary directory used for table creation and deletion */
public static final String HBASE_TEMP_DIRECTORY = ".tmp";
+ /**
+ * Directory used for storing master keys for the cluster
+ */
+ public static final String SYSTEM_KEYS_DIRECTORY = ".system_keys";
+ public static final String SYSTEM_KEY_FILE_PREFIX = "system_key.";
/**
* The period (in milliseconds) between computing region server point in time metrics
*/
@@ -1305,6 +1310,34 @@ public enum OperationStatusCode {
/** Configuration key for enabling WAL encryption, a boolean */
public static final String ENABLE_WAL_ENCRYPTION = "hbase.regionserver.wal.encryption";
+ /** Property used by ManagedKeyStoreKeyProvider class to set the alias that identifies
+ * the current system key. */
+ public static final String CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY =
+ "hbase.crypto.managed_key_store.system.key.name";
+ public static final String CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX =
+ "hbase.crypto.managed_key_store.cust.";
+
+ /** Enables or disables the key management feature. */
+ public static final String CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY =
+ "hbase.crypto.managed_keys.enabled";
+ public static final boolean CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED = false;
+
+ /** Enables or disables key lookup during data path as an alternative to static injection of keys
+ * using control path. */
+ public static final String CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY =
+ "hbase.crypto.managed_keys.dynamic_lookup.enabled";
+ public static final boolean CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_DEFAULT_ENABLED = true;
+
+ /** Maximum number of entries in the managed key data cache. */
+ public static final String CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY =
+ "hbase.crypto.managed_keys.l1_cache.max_entries";
+ public static final int CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT = 1000;
+
+ /** Maximum number of entries in the managed key active keys cache. */
+ public static final String CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY =
+ "hbase.crypto.managed_keys.l1_active_cache.max_ns_entries";
+ public static final int CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT = 100;
+
/** Configuration key for setting RPC codec class name */
public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec";
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 13e335b82ee3..336c440c4493 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -33,8 +33,10 @@
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.crypto.aes.AES;
import org.apache.hadoop.hbase.util.Bytes;
@@ -468,6 +470,19 @@ public static void encryptWithSubjectKey(OutputStream out, InputStream in, Strin
if (key == null) {
throw new IOException("No key found for subject '" + subject + "'");
}
+ encryptWithGivenKey(key, out, in, cipher, iv);
+ }
+
+ /**
+ * Encrypts a block of plaintext with the specified symmetric key.
+ * @param key The symmetric key
+ * @param out ciphertext
+ * @param in plaintext
+ * @param cipher the encryption algorithm
+ * @param iv the initialization vector, can be null
+ */
+ public static void encryptWithGivenKey(Key key, OutputStream out, InputStream in,
+ Cipher cipher, byte[] iv) throws IOException {
Encryptor e = cipher.getEncryptor();
e.setKey(key);
e.setIv(iv); // can be null
@@ -490,19 +505,16 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o
if (key == null) {
throw new IOException("No key found for subject '" + subject + "'");
}
- Decryptor d = cipher.getDecryptor();
- d.setKey(key);
- d.setIv(iv); // can be null
try {
- decrypt(out, in, outLen, d);
+ decryptWithGivenKey(key, out, in, outLen, cipher, iv);
} catch (IOException e) {
// If the current cipher algorithm fails to unwrap, try the alternate cipher algorithm, if one
// is configured
String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY);
if (alternateAlgorithm != null) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Unable to decrypt data with current cipher algorithm '"
- + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
+ LOG.debug("Unable to decrypt data with current cipher algorithm '" + conf.get(
+ HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
+ "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm
+ "' configured.");
}
@@ -510,16 +522,22 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o
if (alterCipher == null) {
throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available");
}
- d = alterCipher.getDecryptor();
- d.setKey(key);
- d.setIv(iv); // can be null
- decrypt(out, in, outLen, d);
- } else {
- throw new IOException(e);
+ decryptWithGivenKey(key, out, in, outLen, alterCipher, iv);
+ }
+ else {
+ throw e;
}
}
}
+ public static void decryptWithGivenKey(Key key, OutputStream out, InputStream in, int outLen,
+ Cipher cipher, byte[] iv) throws IOException {
+ Decryptor d = cipher.getDecryptor();
+ d.setKey(key);
+ d.setIv(iv); // can be null
+ decrypt(out, in, outLen, d);
+ }
+
private static ClassLoader getClassLoaderForClass(Class> c) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
@@ -561,6 +579,9 @@ public static KeyProvider getKeyProvider(Configuration conf) {
provider = (KeyProvider) ReflectionUtils
.newInstance(getClassLoaderForClass(KeyProvider.class).loadClass(providerClassName), conf);
provider.init(providerParameters);
+ if (provider instanceof ManagedKeyProvider) {
+ ((ManagedKeyProvider) provider).initConfig(conf);
+ }
if (LOG.isDebugEnabled()) {
LOG.debug("Installed " + providerClassName + " into key provider cache");
}
@@ -571,6 +592,11 @@ public static KeyProvider getKeyProvider(Configuration conf) {
}
}
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
+ public static void clearKeyProviderCache() {
+ keyProviderCache.clear();
+ }
+
public static void incrementIv(byte[] iv) {
incrementIv(iv, 1);
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
index 604bede13b20..c401d3b3f6b9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
@@ -76,6 +76,8 @@
@InterfaceAudience.Public
public class KeyStoreKeyProvider implements KeyProvider {
+ private static final char[] NO_PASSWORD = new char[0];
+
protected KeyStore store;
protected char[] password; // can be null if no password
protected Properties passwordFile; // can be null if no file provided
@@ -172,9 +174,15 @@ protected char[] getAliasPassword(String alias) {
@Override
public Key getKey(String alias) {
+ // First try with no password, as it is more common to have a password only for the store.
try {
- return store.getKey(alias, getAliasPassword(alias));
+ return store.getKey(alias, NO_PASSWORD);
} catch (UnrecoverableKeyException e) {
+ try {
+ return store.getKey(alias, getAliasPassword(alias));
+ } catch (UnrecoverableKeyException|NoSuchAlgorithmException|KeyStoreException e2) {
+ // Ignore.
+ }
throw new RuntimeException(e);
} catch (KeyStoreException e) {
throw new RuntimeException(e);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
new file mode 100644
index 000000000000..ca8d55f97faa
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
@@ -0,0 +1,289 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import java.security.Key;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;
+import java.util.Base64;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+/**
+ * This class represents an encryption key data which includes the key itself, its state, metadata
+ * and a prefix. The metadata encodes enough information on the key such that it can be used to
+ * retrieve the exact same key again in the future. If the key state is
+ * {@link ManagedKeyState#FAILED} expect the key to be {@code null}.
+ *
+ * The key data is represented by the following fields:
+ *
+ * key_cust: The prefix for which this key belongs to
+ * theKey: The key capturing the bytes and encoding
+ * keyState: The state of the key (see {@link ManagedKeyState})
+ * keyMetadata: Metadata that identifies the key
+ *
+ *
+ * The class provides methods to retrieve, as well as to compute a checksum
+ * for the key data. The checksum is used to ensure the integrity of the key data.
+ *
+ * The class also provides a method to generate an MD5 hash of the key metadata, which can be used
+ * for validation and identification.
+ */
+@InterfaceAudience.Public
+public class ManagedKeyData {
+ /**
+ * Special value to be used for custodian or namespace to indicate that it is global, meaning it
+ * is not associated with a specific custodian or namespace.
+ */
+ public static final String KEY_SPACE_GLOBAL = "*";
+
+ /**
+ * Encoded form of global custodian.
+ */
+ public static final String KEY_GLOBAL_CUSTODIAN =
+ ManagedKeyProvider.encodeToStr(KEY_SPACE_GLOBAL.getBytes());
+
+ private final byte[] keyCustodian;
+ private final String keyNamespace;
+ private final Key theKey;
+ private final ManagedKeyState keyState;
+ private final String keyMetadata;
+ private final long refreshTimestamp;
+ private volatile long keyChecksum = 0;
+ private byte[] keyMetadataHash;
+
+ /**
+ * Constructs a new instance with the given parameters.
+ *
+ * @param key_cust The key custodian.
+ * @param theKey The actual key, can be {@code null}.
+ * @param keyState The state of the key.
+ * @param keyMetadata The metadata associated with the key.
+ * @throws NullPointerException if any of key_cust, keyState or keyMetadata is null.
+ */
+ public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, ManagedKeyState keyState,
+ String keyMetadata) {
+ this(key_cust, key_namespace, theKey, keyState, keyMetadata,
+ EnvironmentEdgeManager.currentTime());
+ }
+
+ /**
+ * Constructs a new instance with the given parameters including refresh timestamp.
+ *
+ * @param key_cust The key custodian.
+ * @param theKey The actual key, can be {@code null}.
+ * @param keyState The state of the key.
+ * @param keyMetadata The metadata associated with the key.
+ * @param refreshTimestamp The refresh timestamp for the key.
+ * @throws NullPointerException if any of key_cust, keyState or keyMetadata is null.
+ */
+ public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, ManagedKeyState keyState,
+ String keyMetadata, long refreshTimestamp) {
+ Preconditions.checkNotNull(key_cust, "key_cust should not be null");
+ Preconditions.checkNotNull(key_namespace, "key_namespace should not be null");
+ Preconditions.checkNotNull(keyState, "keyState should not be null");
+ // Only check for null metadata if state is not FAILED
+ if (keyState != ManagedKeyState.FAILED) {
+ Preconditions.checkNotNull(keyMetadata, "keyMetadata should not be null");
+ }
+
+ this.keyCustodian = key_cust;
+ this.keyNamespace = key_namespace;
+ this.theKey = theKey;
+ this.keyState = keyState;
+ this.keyMetadata = keyMetadata;
+ this.refreshTimestamp = refreshTimestamp;
+ }
+
+ @InterfaceAudience.Private
+ public ManagedKeyData cloneWithoutKey() {
+ return new ManagedKeyData(keyCustodian, keyNamespace, null, keyState, keyMetadata,
+ refreshTimestamp);
+ }
+
+ /**
+ * Returns the custodian associated with the key.
+ *
+ * @return The key custodian as a byte array.
+ */
+ public byte[] getKeyCustodian() {
+ return keyCustodian;
+ }
+
+ /**
+ * Return the key Custodian in Base64 encoded form.
+ * @return the encoded key custodian
+ */
+ public String getKeyCustodianEncoded() {
+ return Base64.getEncoder().encodeToString(keyCustodian);
+ }
+
+
+ /**
+ * Returns the namespace associated with the key.
+ *
+ * @return The namespace as a {@code String}.
+ */
+ public String getKeyNamespace() {
+ return keyNamespace;
+ }
+
+ /**
+ * Returns the actual key.
+ *
+ * @return The key as a {@code Key} object.
+ */
+ public Key getTheKey() {
+ return theKey;
+ }
+
+ /**
+ * Returns the state of the key.
+ *
+ * @return The key state as a {@code ManagedKeyState} enum value.
+ */
+ public ManagedKeyState getKeyState() {
+ return keyState;
+ }
+
+ /**
+ * Returns the metadata associated with the key.
+ *
+ * @return The key metadata as a {@code String}.
+ */
+ public String getKeyMetadata() {
+ return keyMetadata;
+ }
+
+ /**
+ * Returns the refresh timestamp of the key.
+ *
+ * @return The refresh timestamp as a long value.
+ */
+ public long getRefreshTimestamp() {
+ return refreshTimestamp;
+ }
+
+ @Override
+ public String toString() {
+ return "ManagedKeyData{" +
+ "keyCustodian=" + Arrays.toString(keyCustodian) +
+ ", keyNamespace='" + keyNamespace + '\'' +
+ ", keyState=" + keyState +
+ ", keyMetadata='" + keyMetadata + '\'' +
+ ", refreshTimestamp=" + refreshTimestamp +
+ ", keyChecksum=" + getKeyChecksum() +
+ '}';
+ }
+
+ /**
+ * Computes the checksum of the key. If the checksum has already been computed, this method
+ * returns the previously computed value. The checksum is computed using the CRC32C algorithm.
+ *
+ * @return The checksum of the key as a long value, {@code 0} if no key is available.
+ */
+ public long getKeyChecksum() {
+ if (theKey == null) {
+ return 0;
+ }
+ if (keyChecksum == 0) {
+ keyChecksum = constructKeyChecksum(theKey.getEncoded());
+ }
+ return keyChecksum;
+ }
+
+ public static long constructKeyChecksum(byte[] data) {
+ DataChecksum dataChecksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 16);
+ dataChecksum.update(data, 0, data.length);
+ return dataChecksum.getValue();
+ }
+
+ /**
+ * Computes the hash of the key metadata. If the hash has already been computed, this method
+ * returns the previously computed value. The hash is computed using the MD5 algorithm.
+ *
+ * @return The hash of the key metadata as a byte array.
+ */
+ public byte[] getKeyMetadataHash() {
+ if (keyMetadataHash == null && keyMetadata != null) {
+ keyMetadataHash = constructMetadataHash(keyMetadata);
+ }
+ return keyMetadataHash;
+ }
+
+ /**
+ * Return the hash of key metadata in Base64 encoded form.
+ * @return the encoded hash or {@code null} if no meatadata is available.
+ */
+ public String getKeyMetadataHashEncoded() {
+ byte[] hash = getKeyMetadataHash();
+ if (hash != null) {
+ return Base64.getEncoder().encodeToString(hash);
+ }
+ return null;
+ }
+
+ public static byte[] constructMetadataHash(String metadata) {
+ MessageDigest md5;
+ try {
+ md5 = MessageDigest.getInstance("MD5");
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException(e);
+ }
+ return md5.digest(metadata.getBytes());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ ManagedKeyData that = (ManagedKeyData) o;
+
+ return new EqualsBuilder()
+ .append(keyCustodian, that.keyCustodian)
+ .append(keyNamespace, that.keyNamespace)
+ .append(theKey, that.theKey)
+ .append(keyState, that.keyState)
+ .append(keyMetadata, that.keyMetadata)
+ .isEquals();
+ }
+
+ @Override
+ public int hashCode() {
+ return new HashCodeBuilder(17, 37)
+ .append(keyCustodian)
+ .append(keyNamespace)
+ .append(theKey)
+ .append(keyState)
+ .append(keyMetadata)
+ .toHashCode();
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
new file mode 100644
index 000000000000..27cd91380d6e
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import java.io.IOException;
+import java.util.Base64;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Interface for key providers of managed keys. Defines methods for generating and managing
+ * managed keys, as well as handling key storage and retrieval.
+ *
+ * The interface extends the basic {@link KeyProvider} interface with additional
+ * methods for working with managed keys.
+ */
+@InterfaceAudience.Public
+public interface ManagedKeyProvider extends KeyProvider {
+ /**
+ * Initialize the provider with the given configuration.
+ *
+ * @param conf Hadoop configuration
+ */
+ void initConfig(Configuration conf);
+
+ /**
+ * Retrieve the system key using the given system identifier.
+ *
+ * @param systemId system identifier
+ * @return ManagedKeyData for the system key and is guaranteed to be not {@code null}
+ * @throws IOException if an error occurs while retrieving the key
+ */
+ ManagedKeyData getSystemKey(byte[] systemId) throws IOException;
+
+ /**
+ * Retrieve a managed key for the specified prefix.
+ *
+ * @param key_cust The key custodian.
+ * @param key_namespace Key namespace
+ * @return ManagedKeyData for the system key and is expected to be not {@code null}
+ * @throws IOException if an error occurs while retrieving the key
+ */
+ ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException;
+
+ /**
+ * Retrieve a key identified by the key metadata. The key metadata is typically generated by the
+ * same key provider via the {@link #getSystemKey(byte[])} or
+ * {@link #getManagedKey(byte[], String)} methods. If key couldn't be retrieved using metadata and
+ * the wrappedKey is provided, the implementation may try to decrypt it as a fallback operation.
+ *
+ * @param keyMetaData Key metadata, must not be {@code null}.
+ * @param wrappedKey The DEK key material encrypted with the corresponding KEK, if available.
+ * @return ManagedKeyData for the key represented by the metadata and is expected to be not
+ * {@code null}
+ * @throws IOException if an error occurs while generating the key
+ */
+ @NonNull ManagedKeyData unwrapKey(String keyMetaData, byte[] wrappedKey) throws IOException;
+
+ /**
+ * Decode the given key custodian which is encoded as Base64 string.
+ * @param encodedKeyCust The encoded key custodian
+ * @return the decoded key custodian
+ * @throws IOException if the encoded key custodian is not a valid Base64 string
+ */
+ static byte[] decodeToBytes(String encodedKeyCust) throws IOException {
+ byte[] key_cust;
+ try {
+ key_cust = Base64.getDecoder().decode(encodedKeyCust);
+ }
+ catch (IllegalArgumentException e) {
+ throw new IOException("Failed to decode specified key custodian as Base64 string: "
+ + encodedKeyCust, e);
+ }
+ return key_cust;
+ }
+
+ /**
+ * Encode the given key custodian as Base64 string.
+ * @param key_cust The key custodian
+ * @return the encoded key custodian as Base64 string
+ */
+ static String encodeToStr(byte[] key_cust) {
+ return Base64.getEncoder().encodeToString(key_cust);
+ }
+
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java
new file mode 100644
index 000000000000..ea64355fc56b
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Enum of Managed key status. It is used to indicate the status of managed custodian keys.
+ */
+@InterfaceAudience.Public
+public enum ManagedKeyState {
+ /** Represents the active status of a managed key. */
+ ACTIVE((byte) 1),
+ /** Represents the inactive status of a managed key. */
+ INACTIVE((byte) 2),
+ /** Represents the retrieval failure status of a managed key. */
+ FAILED((byte) 3),
+ /** Represents the disabled status of a managed key. */
+ DISABLED((byte) 4),
+ ;
+
+ private static Map lookupByVal;
+
+ private final byte val;
+
+ private ManagedKeyState(byte val) {
+ this.val = val;
+ }
+
+ /**
+ * Returns the numeric value of the managed key status.
+ * @return byte value
+ */
+ public byte getVal() {
+ return val;
+ }
+
+ /**
+ * Returns the ManagedKeyState for the given numeric value.
+ * @param val The numeric value of the desired ManagedKeyState
+ * @return The ManagedKeyState corresponding to the given value
+ */
+ public static ManagedKeyState forValue(byte val) {
+ if (lookupByVal == null) {
+ Map tbl = new HashMap<>();
+ for (ManagedKeyState e: ManagedKeyState.values()) {
+ tbl.put(e.getVal(), e);
+ }
+ lookupByVal = tbl;
+ }
+ return lookupByVal.get(val);
+ }
+
+ /**
+ * This is used to determine if a key is usable for encryption/decryption.
+ *
+ * @param state The key state to check
+ * @return true if the key state is ACTIVE or INACTIVE, false otherwise
+ */
+ public static boolean isUsable(ManagedKeyState state) {
+ return state == ACTIVE || state == INACTIVE;
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
new file mode 100644
index 000000000000..b9005e1b27e7
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import com.google.gson.reflect.TypeToken;
+import java.io.IOException;
+import java.security.Key;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.GsonUtil;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Public
+public class ManagedKeyStoreKeyProvider extends KeyStoreKeyProvider implements ManagedKeyProvider {
+ public static final String KEY_METADATA_ALIAS = "KeyAlias";
+ public static final String KEY_METADATA_CUST = "KeyCustodian";
+
+ private static final java.lang.reflect.Type KEY_METADATA_TYPE =
+ new TypeToken>(){}.getType();
+
+ private Configuration conf;
+
+ @Override
+ public void initConfig(Configuration conf) {
+ this.conf = conf;
+ }
+
+ @Override
+ public ManagedKeyData getSystemKey(byte[] clusterId) {
+ checkConfig();
+ String systemKeyAlias = conf.get(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY,
+ null);
+ if (systemKeyAlias == null) {
+ throw new RuntimeException("No alias configured for system key");
+ }
+ Key key = getKey(systemKeyAlias);
+ if (key == null) {
+ throw new RuntimeException("Unable to find system key with alias: " + systemKeyAlias);
+ }
+ // Encode clusterId too for consistency with that of key custodian.
+ String keyMetadata = generateKeyMetadata(systemKeyAlias,
+ ManagedKeyProvider.encodeToStr(clusterId));
+ return new ManagedKeyData(clusterId, ManagedKeyData.KEY_SPACE_GLOBAL, key,
+ ManagedKeyState.ACTIVE, keyMetadata);
+ }
+
+ @Override
+ public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException {
+ checkConfig();
+ String encodedCust = ManagedKeyProvider.encodeToStr(key_cust);
+ String aliasConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + "." +
+ "alias";
+ String keyMetadata = generateKeyMetadata(conf.get(aliasConfKey, null), encodedCust);
+ return unwrapKey(keyMetadata, null);
+ }
+
+ @Override
+ public ManagedKeyData unwrapKey(String keyMetadataStr, byte[] wrappedKey) throws IOException {
+ Map keyMetadata = GsonUtil.getDefaultInstance().fromJson(keyMetadataStr,
+ KEY_METADATA_TYPE);
+ String encodedCust = keyMetadata.get(KEY_METADATA_CUST);
+ String activeStatusConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust +
+ ".active";
+ boolean isActive = conf.getBoolean(activeStatusConfKey, true);
+ byte[] key_cust = ManagedKeyProvider.decodeToBytes(encodedCust);
+ String alias = keyMetadata.get(KEY_METADATA_ALIAS);
+ Key key = alias != null ? getKey(alias) : null;
+ if (key != null) {
+ return new ManagedKeyData(key_cust, ManagedKeyData.KEY_SPACE_GLOBAL, key,
+ isActive ? ManagedKeyState.ACTIVE : ManagedKeyState.INACTIVE, keyMetadataStr);
+ }
+ return new ManagedKeyData(key_cust, ManagedKeyData.KEY_SPACE_GLOBAL, null,
+ isActive ? ManagedKeyState.FAILED : ManagedKeyState.DISABLED, keyMetadataStr);
+ }
+
+ private void checkConfig() {
+ if (conf == null) {
+ throw new IllegalStateException("initConfig is not called or config is null");
+ }
+ }
+
+ public static String generateKeyMetadata(String aliasName, String encodedCust) {
+ Map metadata = new HashMap<>(2);
+ metadata.put(KEY_METADATA_ALIAS, aliasName);
+ metadata.put(KEY_METADATA_CUST, encodedCust);
+ return GsonUtil.getDefaultInstance().toJson(metadata, HashMap.class);
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
new file mode 100644
index 000000000000..2e52dccc0598
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * KeymetaAdmin is an interface for administrative functions related to managed keys.
+ * It handles the following methods:
+ */
+@InterfaceAudience.Public
+public interface KeymetaAdmin {
+ /**
+ * Enables key management for the specified custodian and namespace.
+ *
+ * @param keyCust The key custodian in base64 encoded format.
+ * @param keyNamespace The namespace for the key management.
+ *
+ * @return The list of {@link ManagedKeyData} objects each identifying the key and its current
+ * status.
+ * @throws IOException if an error occurs while enabling key management.
+ */
+ List enableKeyManagement(String keyCust, String keyNamespace)
+ throws IOException, KeyException;
+
+ /**
+ * Get the status of all the keys for the specified custodian.
+ *
+ * @param keyCust The key custodian in base64 encoded format.
+ * @param keyNamespace The namespace for the key management.
+ * @return The list of {@link ManagedKeyData} objects each identifying the key and its current
+ * status.
+ * @throws IOException if an error occurs while enabling key management.
+ */
+ List getManagedKeys(String keyCust, String keyNamespace)
+ throws IOException, KeyException;
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index 96b3dbd4a8a5..55da4b3b12c0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -1688,16 +1688,31 @@ public static byte[] add(final byte[] a, final byte[] b) {
/**
* Concatenate byte arrays.
+ *
* @param a first third
* @param b second third
* @param c third third
* @return New array made from a, b and c
*/
public static byte[] add(final byte[] a, final byte[] b, final byte[] c) {
- byte[] result = new byte[a.length + b.length + c.length];
+ return add(a, b, c, EMPTY_BYTE_ARRAY);
+ }
+
+ /**
+ * Concatenate byte arrays.
+ *
+ * @param a first fourth
+ * @param b second fourth
+ * @param c third fourth
+ * @param d fourth fourth
+ * @return New array made from a, b, c, and d
+ */
+ public static byte[] add(final byte[] a, final byte[] b, final byte[] c, final byte[] d) {
+ byte[] result = new byte[a.length + b.length + c.length + d.length];
System.arraycopy(a, 0, result, 0, a.length);
System.arraycopy(b, 0, result, a.length, b.length);
System.arraycopy(c, 0, result, a.length + b.length, c.length);
+ System.arraycopy(d, 0, result, a.length + b.length + c.length, d.length);
return result;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index fe6f3bc238a9..da4662d2c8a0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -297,6 +297,10 @@ public static void setRootDir(final Configuration c, final Path root) {
c.set(HConstants.HBASE_DIR, root.toString());
}
+ public static Path getSystemKeyDir(final Configuration c) throws IOException {
+ return new Path(getRootDir(c), HConstants.SYSTEM_KEYS_DIRECTORY);
+ }
+
public static void setFsDefault(final Configuration c, final Path root) {
c.set("fs.defaultFS", root.toString()); // for hadoop 0.21+
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
index e592b1f935a1..a4a8ce82b2a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
@@ -19,8 +19,10 @@
import java.io.IOException;
import java.util.concurrent.atomic.LongAdder;
+
import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hbase.thirdparty.com.google.gson.Gson;
import org.apache.hbase.thirdparty.com.google.gson.GsonBuilder;
import org.apache.hbase.thirdparty.com.google.gson.LongSerializationPolicy;
import org.apache.hbase.thirdparty.com.google.gson.TypeAdapter;
@@ -33,6 +35,8 @@
@InterfaceAudience.Private
public final class GsonUtil {
+ private static Gson INSTANCE;
+
private GsonUtil() {
}
@@ -62,4 +66,11 @@ public LongAdder read(JsonReader in) throws IOException {
public static GsonBuilder createGsonWithDisableHtmlEscaping() {
return createGson().disableHtmlEscaping();
}
+
+ public static Gson getDefaultInstance() {
+ if (INSTANCE == null) {
+ INSTANCE = createGson().create();
+ }
+ return INSTANCE;
+ }
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
new file mode 100644
index 000000000000..a3397f96df70
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.NoSuchAlgorithmException;
+import java.util.HashMap;
+import java.util.Map;
+import javax.crypto.KeyGenerator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A simple implementation of ManagedKeyProvider for testing. It generates a key on demand given a
+ * prefix. One can control the state of a key by calling setKeyState and can rotate a key by
+ * calling setKey.
+ */
+public class MockManagedKeyProvider extends MockAesKeyProvider implements ManagedKeyProvider {
+ protected static final Logger LOG = LoggerFactory.getLogger(MockManagedKeyProvider.class);
+
+ private boolean multikeyGenMode;
+ private Map> keys = new HashMap<>();
+ private Map> lastGenKeyData = new HashMap<>();
+ // Keep references of all generated keys by their full and partial metadata.
+ private Map allGeneratedKeys = new HashMap<>();
+ private Map keyState = new HashMap<>();
+ private String systemKeyAlias = "default_system_key_alias";
+
+ @Override
+ public void initConfig(Configuration conf) {
+ // NO-OP
+ }
+
+ @Override
+ public ManagedKeyData getSystemKey(byte[] systemId) throws IOException {
+ return getKey(systemId, systemKeyAlias, ManagedKeyData.KEY_SPACE_GLOBAL);
+ }
+
+ @Override
+ public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace)
+ throws IOException {
+ String alias = Bytes.toString(key_cust);
+ return getKey(key_cust, alias, key_namespace);
+ }
+
+ @Override
+ public ManagedKeyData unwrapKey(String keyMetadata, byte[] wrappedKey) throws IOException {
+ String[] meta_toks = keyMetadata.split(":");
+ if (allGeneratedKeys.containsKey(keyMetadata)) {
+ ManagedKeyState keyState = this.keyState.get(meta_toks[1]);
+ ManagedKeyData managedKeyData =
+ new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2],
+ allGeneratedKeys.get(keyMetadata),
+ keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata);
+ return registerKeyData(meta_toks[1], managedKeyData);
+ }
+ return new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2],
+ null, ManagedKeyState.FAILED, keyMetadata);
+ }
+
+ public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace) {
+ if (! lastGenKeyData.containsKey(keyNamespace)) {
+ return null;
+ }
+ return lastGenKeyData.get(keyNamespace).get(alias);
+ }
+
+ private ManagedKeyData registerKeyData(String alias, ManagedKeyData managedKeyData) {
+ if (! lastGenKeyData.containsKey(managedKeyData.getKeyNamespace())) {
+ lastGenKeyData.put(managedKeyData.getKeyNamespace(), new HashMap<>());
+ }
+ lastGenKeyData.get(managedKeyData.getKeyNamespace()).put(alias,
+ managedKeyData);
+ return managedKeyData;
+ }
+
+ public void setMultikeyGenMode(boolean multikeyGenMode) {
+ this.multikeyGenMode = multikeyGenMode;
+ }
+
+ public void setMockedKeyState(String alias, ManagedKeyState status) {
+ keyState.put(alias, status);
+ }
+
+ public void setMockedKey(String alias, Key key, String keyNamespace) {
+ if (! keys.containsKey(keyNamespace)) {
+ keys.put(keyNamespace, new HashMap<>());
+ }
+ Map keysForSpace = keys.get(keyNamespace);
+ keysForSpace.put(alias, key);
+ }
+
+ public Key getMockedKey(String alias, String keySpace) {
+ Map keysForSpace = keys.get(keySpace);
+ return keysForSpace != null ? keysForSpace.get(alias) : null;
+ }
+
+ public void setClusterKeyAlias(String alias) {
+ this.systemKeyAlias = alias;
+ }
+
+ public String getSystemKeyAlias() {
+ return this.systemKeyAlias;
+ }
+
+ /**
+ * Generate a new secret key.
+ * @return the key
+ */
+ public static Key generateSecretKey() {
+ KeyGenerator keyGen = null;
+ try {
+ keyGen = KeyGenerator.getInstance("AES");
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException(e);
+ }
+ keyGen.init(256);
+ return keyGen.generateKey();
+ }
+
+ private ManagedKeyData getKey(byte[] key_cust, String alias, String key_namespace) {
+ ManagedKeyState keyState = this.keyState.get(alias);
+ if (! keys.containsKey(key_namespace)) {
+ keys.put(key_namespace, new HashMap<>());
+ }
+ Map keySpace = keys.get(key_namespace);
+ Key key = null;
+ if (keyState != ManagedKeyState.FAILED && keyState != ManagedKeyState.DISABLED) {
+ if (multikeyGenMode || ! keySpace.containsKey(alias)) {
+ key = generateSecretKey();
+ keySpace.put(alias, key);
+ }
+ key = keySpace.get(alias);
+ if (key == null) {
+ return null;
+ }
+ }
+ long checksum = key == null ? 0 : ManagedKeyData.constructKeyChecksum(key.getEncoded());
+ String partialMetadata = Bytes.toString(key_cust) + ":" + alias;
+ String keyMetadata = partialMetadata + ":" + key_namespace + ":" + checksum;
+ allGeneratedKeys.put(partialMetadata, key);
+ allGeneratedKeys.put(keyMetadata, key);
+ ManagedKeyData managedKeyData =
+ new ManagedKeyData(key_cust, key_namespace, key,
+ keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata);
+ return registerKeyData(alias, managedKeyData);
+ }
+}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
index 581681988c28..de91aa904581 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
@@ -26,6 +26,8 @@
import java.security.Key;
import java.security.KeyStore;
import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Properties;
import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -33,12 +35,15 @@
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.BeforeClass;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
@Category({ MiscTests.class, SmallTests.class })
+@RunWith(Parameterized.class)
public class TestKeyStoreKeyProvider {
@ClassRule
@@ -53,14 +58,33 @@ public class TestKeyStoreKeyProvider {
static File storeFile;
static File passwordFile;
- @BeforeClass
- public static void setUp() throws Exception {
+ protected KeyProvider provider;
+
+ @Parameterized.Parameter(0)
+ public boolean withPasswordOnAlias;
+ @Parameterized.Parameter(1)
+ public boolean withPasswordFile;
+
+ @Parameterized.Parameters(name = "withPasswordOnAlias={0} withPasswordFile={1}")
+ public static Collection parameters() {
+ return Arrays.asList(new Object[][] {
+ { Boolean.TRUE, Boolean.TRUE },
+ { Boolean.TRUE, Boolean.FALSE },
+ { Boolean.FALSE, Boolean.TRUE },
+ { Boolean.FALSE, Boolean.FALSE },
+ });
+ }
+
+ @Before
+ public void setUp() throws Exception {
KEY = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(ALIAS));
// Create a JKECS store containing a test secret key
KeyStore store = KeyStore.getInstance("JCEKS");
store.load(null, PASSWORD.toCharArray());
store.setEntry(ALIAS, new KeyStore.SecretKeyEntry(new SecretKeySpec(KEY, "AES")),
- new KeyStore.PasswordProtection(PASSWORD.toCharArray()));
+ new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
+ Properties p = new Properties();
+ addCustomEntries(store, p);
// Create the test directory
String dataDir = TEST_UTIL.getDataTestDir().toString();
new File(dataDir).mkdirs();
@@ -73,8 +97,6 @@ public static void setUp() throws Exception {
os.close();
}
// Write the password file
- Properties p = new Properties();
- p.setProperty(ALIAS, PASSWORD);
passwordFile = new File(dataDir, "keystore.pw");
os = new FileOutputStream(passwordFile);
try {
@@ -82,26 +104,27 @@ public static void setUp() throws Exception {
} finally {
os.close();
}
- }
- @Test
- public void testKeyStoreKeyProviderWithPassword() throws Exception {
- KeyProvider provider = new KeyStoreKeyProvider();
- provider.init("jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD);
- Key key = provider.getKey(ALIAS);
- assertNotNull(key);
- byte[] keyBytes = key.getEncoded();
- assertEquals(keyBytes.length, KEY.length);
- for (int i = 0; i < KEY.length; i++) {
- assertEquals(keyBytes[i], KEY[i]);
+ provider = createProvider();
+ if (withPasswordFile) {
+ provider.init("jceks://" + storeFile.toURI().getPath() + "?passwordFile="
+ + URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8"));
+ }
+ else {
+ provider.init("jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD);
}
}
+ protected KeyProvider createProvider() {
+ return new KeyStoreKeyProvider();
+ }
+
+ protected void addCustomEntries(KeyStore store, Properties passwdProps) throws Exception {
+ passwdProps.setProperty(ALIAS, PASSWORD);
+ }
+
@Test
- public void testKeyStoreKeyProviderWithPasswordFile() throws Exception {
- KeyProvider provider = new KeyStoreKeyProvider();
- provider.init("jceks://" + storeFile.toURI().getPath() + "?passwordFile="
- + URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8"));
+ public void testKeyStoreKeyProvider() throws Exception {
Key key = provider.getKey(ALIAS);
assertNotNull(key);
byte[] keyBytes = key.getEncoded();
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java
new file mode 100644
index 000000000000..96b58a17b8e0
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import java.security.Key;
+import java.security.NoSuchAlgorithmException;
+import java.util.Base64;
+
+import javax.crypto.KeyGenerator;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestManagedKeyData {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagedKeyData.class);
+
+ private byte[] keyCust;
+ private String keyNamespace;
+ private Key theKey;
+ private ManagedKeyState keyState;
+ private String keyMetadata;
+ private ManagedKeyData managedKeyData;
+
+ @Before
+ public void setUp() throws NoSuchAlgorithmException {
+ keyCust = "testCustodian".getBytes();
+ keyNamespace = "testNamespace";
+ KeyGenerator keyGen = KeyGenerator.getInstance("AES");
+ keyGen.init(256);
+ theKey = keyGen.generateKey();
+ keyState = ManagedKeyState.ACTIVE;
+ keyMetadata = "testMetadata";
+ managedKeyData = new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata);
+ }
+
+ @Test
+ public void testConstructor() {
+ assertNotNull(managedKeyData);
+ assertEquals(keyNamespace, managedKeyData.getKeyNamespace());
+ assertArrayEquals(keyCust, managedKeyData.getKeyCustodian());
+ assertEquals(theKey, managedKeyData.getTheKey());
+ assertEquals(keyState, managedKeyData.getKeyState());
+ assertEquals(keyMetadata, managedKeyData.getKeyMetadata());
+ }
+
+ @Test
+ public void testConstructorNullChecks() {
+ assertThrows(NullPointerException.class,
+ () -> new ManagedKeyData(null, keyNamespace, theKey, keyState, keyMetadata));
+ assertThrows(NullPointerException.class,
+ () -> new ManagedKeyData(keyCust, null, theKey, keyState, keyMetadata));
+ assertThrows(NullPointerException.class,
+ () -> new ManagedKeyData(keyCust, keyNamespace, theKey, null, keyMetadata));
+ assertThrows(NullPointerException.class,
+ () -> new ManagedKeyData(keyCust, keyNamespace, theKey, ManagedKeyState.ACTIVE, null));
+ }
+
+ @Test
+ public void testConstructorWithFailedStateAndNullMetadata() {
+ ManagedKeyData keyData = new ManagedKeyData(keyCust, keyNamespace, null,
+ ManagedKeyState.FAILED, null);
+ assertNotNull(keyData);
+ assertEquals(ManagedKeyState.FAILED, keyData.getKeyState());
+ assertNull(keyData.getKeyMetadata());
+ assertNull(keyData.getTheKey());
+ }
+
+ @Test
+ public void testConstructorWithRefreshTimestamp() {
+ long refreshTimestamp = System.currentTimeMillis();
+ ManagedKeyData keyDataWithTimestamp = new ManagedKeyData(keyCust, keyNamespace, theKey,
+ keyState, keyMetadata, refreshTimestamp);
+ assertEquals(refreshTimestamp, keyDataWithTimestamp.getRefreshTimestamp());
+ }
+
+ @Test
+ public void testCloneWithoutKey() {
+ ManagedKeyData cloned = managedKeyData.cloneWithoutKey();
+ assertNull(cloned.getTheKey());
+ assertEquals(managedKeyData.getKeyCustodian(), cloned.getKeyCustodian());
+ assertEquals(managedKeyData.getKeyNamespace(), cloned.getKeyNamespace());
+ assertEquals(managedKeyData.getKeyState(), cloned.getKeyState());
+ assertEquals(managedKeyData.getKeyMetadata(), cloned.getKeyMetadata());
+ }
+
+ @Test
+ public void testGetKeyCustodianEncoded() {
+ String encoded = managedKeyData.getKeyCustodianEncoded();
+ assertNotNull(encoded);
+ assertArrayEquals(keyCust, Base64.getDecoder().decode(encoded));
+ }
+
+ @Test
+ public void testGetKeyChecksum() {
+ long checksum = managedKeyData.getKeyChecksum();
+ assertNotEquals(0, checksum);
+
+ // Test with null key
+ ManagedKeyData nullKeyData =
+ new ManagedKeyData(keyCust, keyNamespace, null, keyState, keyMetadata);
+ assertEquals(0, nullKeyData.getKeyChecksum());
+ }
+
+ @Test
+ public void testConstructKeyChecksum() {
+ byte[] data = "testData".getBytes();
+ long checksum = ManagedKeyData.constructKeyChecksum(data);
+ assertNotEquals(0, checksum);
+ }
+
+ @Test
+ public void testGetKeyMetadataHash() {
+ byte[] hash = managedKeyData.getKeyMetadataHash();
+ assertNotNull(hash);
+ assertEquals(16, hash.length); // MD5 hash is 16 bytes long
+ }
+
+ @Test
+ public void testGetKeyMetadataHashEncoded() {
+ String encodedHash = managedKeyData.getKeyMetadataHashEncoded();
+ assertNotNull(encodedHash);
+ assertEquals(24, encodedHash.length()); // Base64 encoded MD5 hash is 24 characters long
+ }
+
+ @Test
+ public void testGetKeyMetadataHashEncodedWithNullHash() {
+ // Create ManagedKeyData with FAILED state and null metadata
+ ManagedKeyData keyData = new ManagedKeyData(
+ "custodian".getBytes(), "namespace", null, ManagedKeyState.FAILED,
+ null // null metadata should result in null hash
+ );
+
+ String encoded = keyData.getKeyMetadataHashEncoded();
+ assertNull(encoded);
+ }
+
+ @Test
+ public void testConstructMetadataHash() {
+ byte[] hash = ManagedKeyData.constructMetadataHash(keyMetadata);
+ assertNotNull(hash);
+ assertEquals(16, hash.length); // MD5 hash is 16 bytes long
+ }
+
+ @Test
+ public void testToString() {
+ String toString = managedKeyData.toString();
+ assertTrue(toString.contains("keyCustodian"));
+ assertTrue(toString.contains("keyNamespace"));
+ assertTrue(toString.contains("keyState"));
+ assertTrue(toString.contains("keyMetadata"));
+ assertTrue(toString.contains("refreshTimestamp"));
+ assertTrue(toString.contains("keyChecksum"));
+ }
+
+ @Test
+ public void testEquals() {
+ ManagedKeyData same = new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata);
+ assertEquals(managedKeyData, same);
+
+ ManagedKeyData different =
+ new ManagedKeyData("differentCust".getBytes(), keyNamespace, theKey, keyState, keyMetadata);
+ assertNotEquals(managedKeyData, different);
+ }
+
+ @Test
+ public void testHashCode() {
+ ManagedKeyData same = new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata);
+ assertEquals(managedKeyData.hashCode(), same.hashCode());
+
+ ManagedKeyData different =
+ new ManagedKeyData("differentCust".getBytes(), keyNamespace, theKey, keyState, keyMetadata);
+ assertNotEquals(managedKeyData.hashCode(), different.hashCode());
+ }
+
+ @Test
+ public void testConstants() {
+ assertEquals("*", ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertEquals(ManagedKeyProvider.encodeToStr(ManagedKeyData.KEY_SPACE_GLOBAL.getBytes()),
+ ManagedKeyData.KEY_GLOBAL_CUSTODIAN);
+ }
+}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
new file mode 100644
index 000000000000..876e14fa1101
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider.KEY_METADATA_ALIAS;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider.KEY_METADATA_CUST;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import java.security.KeyStore;
+import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.GsonUtil;
+
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Suite;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestManagedKeyProvider.TestManagedKeyStoreKeyProvider.class,
+ TestManagedKeyProvider.TestManagedKeyProviderDefault.class, })
+@Category({ MiscTests.class, SmallTests.class })
+public class TestManagedKeyProvider {
+ @RunWith(Parameterized.class)
+ @Category({ MiscTests.class, SmallTests.class })
+ public static class TestManagedKeyStoreKeyProvider extends TestKeyStoreKeyProvider {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagedKeyStoreKeyProvider.class);
+
+ private static final String SYSTEM_KEY_ALIAS = "system-alias";
+
+ private Configuration conf = HBaseConfiguration.create();
+ private int nPrefixes = 2;
+ private ManagedKeyProvider managedKeyProvider;
+ private Map prefix2key = new HashMap<>();
+ private Map prefix2alias = new HashMap<>();
+ private String clusterId;
+ private byte[] systemKey;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ managedKeyProvider = (ManagedKeyProvider) provider;
+ managedKeyProvider.initConfig(conf);
+ }
+
+ protected KeyProvider createProvider() {
+ return new ManagedKeyStoreKeyProvider();
+ }
+
+ protected void addCustomEntries(KeyStore store, Properties passwdProps) throws Exception {
+ super.addCustomEntries(store, passwdProps);
+ for (int i = 0; i < nPrefixes; ++i) {
+ String prefix = "prefix+ " + i;
+ String alias = prefix + "-alias";
+ byte[] key = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(alias));
+ prefix2alias.put(new Bytes(prefix.getBytes()), alias);
+ prefix2key.put(new Bytes(prefix.getBytes()), new Bytes(key));
+ store.setEntry(alias, new KeyStore.SecretKeyEntry(new SecretKeySpec(key, "AES")),
+ new KeyStore.PasswordProtection(
+ withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
+
+ String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes());
+ String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "."
+ + "alias";
+ conf.set(confKey, alias);
+
+ passwdProps.setProperty(alias, PASSWORD);
+
+ clusterId = UUID.randomUUID().toString();
+ systemKey = MessageDigest.getInstance("SHA-256").digest(
+ Bytes.toBytes(SYSTEM_KEY_ALIAS));
+ store.setEntry(SYSTEM_KEY_ALIAS, new KeyStore.SecretKeyEntry(
+ new SecretKeySpec(systemKey, "AES")),
+ new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() :
+ new char[0]));
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS);
+
+ passwdProps.setProperty(SYSTEM_KEY_ALIAS, PASSWORD);
+ }
+ }
+
+ private void addEntry(String alias, String prefix) {
+ String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes());
+ String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "."
+ + "alias";
+ conf.set(confKey, alias);
+ }
+
+ @Test
+ public void testMissingConfig() throws Exception {
+ managedKeyProvider.initConfig(null);
+ RuntimeException ex = assertThrows(RuntimeException.class,
+ () -> managedKeyProvider.getSystemKey(null));
+ assertEquals("initConfig is not called or config is null", ex.getMessage());
+ }
+
+ @Test
+ public void testGetManagedKey() throws Exception {
+ for (Bytes prefix : prefix2key.keySet()) {
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(prefix.get(),
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyData(keyData, ManagedKeyState.ACTIVE, prefix2key.get(prefix).get(), prefix.get(),
+ prefix2alias.get(prefix));
+ }
+ }
+
+ @Test
+ public void testGetInactiveKey() throws Exception {
+ Bytes firstPrefix = prefix2key.keySet().iterator().next();
+ String encPrefix = Base64.getEncoder().encodeToString(firstPrefix.get());
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + ".active",
+ "false");
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(firstPrefix.get(),
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull(keyData);
+ assertKeyData(keyData, ManagedKeyState.INACTIVE, prefix2key.get(firstPrefix).get(),
+ firstPrefix.get(), prefix2alias.get(firstPrefix));
+ }
+
+ @Test
+ public void testGetInvalidKey() throws Exception {
+ byte[] invalidPrefixBytes = "invalid".getBytes();
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(invalidPrefixBytes,
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull(keyData);
+ assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefixBytes, null);
+ }
+
+ @Test
+ public void testGetDisabledKey() throws Exception {
+ byte[] invalidPrefix = new byte[] { 1, 2, 3 };
+ String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active",
+ "false");
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(invalidPrefix,
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertNotNull(keyData);
+ assertKeyData(keyData, ManagedKeyState.DISABLED, null,
+ invalidPrefix, null);
+ }
+
+ @Test
+ public void testGetSystemKey() throws Exception {
+ ManagedKeyData clusterKeyData = managedKeyProvider.getSystemKey(clusterId.getBytes());
+ assertKeyData(clusterKeyData, ManagedKeyState.ACTIVE, systemKey, clusterId.getBytes(),
+ SYSTEM_KEY_ALIAS);
+ conf.unset(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY);
+ RuntimeException ex = assertThrows(RuntimeException.class,
+ () -> managedKeyProvider.getSystemKey(null));
+ assertEquals("No alias configured for system key", ex.getMessage());
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, "non_existing_alias");
+ ex = assertThrows(RuntimeException.class,
+ () -> managedKeyProvider.getSystemKey(null));
+ assertTrue(ex.getMessage().startsWith("Unable to find system key with alias:"));
+ }
+
+ @Test
+ public void testUnwrapInvalidKey() throws Exception {
+ String invalidAlias = "invalidAlias";
+ byte[] invalidPrefix = new byte[] { 1, 2, 3 };
+ String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
+ String invalidMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias,
+ invalidPrefixEnc);
+ ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null);
+ assertNotNull(keyData);
+ assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefix,
+ invalidAlias);
+ }
+
+ @Test
+ public void testUnwrapDisabledKey() throws Exception {
+ String invalidAlias = "invalidAlias";
+ byte[] invalidPrefix = new byte[] { 1, 2, 3 };
+ String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active",
+ "false");
+ String invalidMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias,
+ invalidPrefixEnc);
+ ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null);
+ assertNotNull(keyData);
+ assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidPrefix, invalidAlias);
+ }
+
+ private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState, byte[] key,
+ byte[] prefixBytes, String alias) throws Exception {
+ assertNotNull(keyData);
+ assertEquals(expKeyState, keyData.getKeyState());
+ if (key == null) {
+ assertNull(keyData.getTheKey());
+ }
+ else {
+ byte[] keyBytes = keyData.getTheKey().getEncoded();
+ assertEquals(key.length, keyBytes.length);
+ assertEquals(new Bytes(key), keyBytes);
+ }
+ Map keyMetadata = GsonUtil.getDefaultInstance().fromJson(keyData.getKeyMetadata(),
+ HashMap.class);
+ assertNotNull(keyMetadata);
+ assertEquals(new Bytes(prefixBytes), keyData.getKeyCustodian());
+ assertEquals(alias, keyMetadata.get(KEY_METADATA_ALIAS));
+ assertEquals(Base64.getEncoder().encodeToString(prefixBytes),
+ keyMetadata.get(KEY_METADATA_CUST));
+ assertEquals(keyData, managedKeyProvider.unwrapKey(keyData.getKeyMetadata(), null));
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MiscTests.class, SmallTests.class })
+ public static class TestManagedKeyProviderDefault {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagedKeyProviderDefault.class);
+
+ @Test public void testEncodeToStr() {
+ byte[] input = { 72, 101, 108, 108, 111 }; // "Hello" in ASCII
+ String expected = "SGVsbG8=";
+ String actual = ManagedKeyProvider.encodeToStr(input);
+
+ assertEquals("Encoded string should match expected Base64 representation", expected, actual);
+ }
+
+ @Test public void testDecodeToBytes() throws Exception {
+ String input = "SGVsbG8="; // "Hello" in Base64
+ byte[] expected = { 72, 101, 108, 108, 111 };
+ byte[] actual = ManagedKeyProvider.decodeToBytes(input);
+
+ assertTrue("Decoded bytes should match expected ASCII representation",
+ Arrays.equals(expected, actual));
+ }
+
+ @Test public void testEncodeToStrAndDecodeToBytes() throws Exception {
+ byte[] originalBytes = { 1, 2, 3, 4, 5 };
+ String encoded = ManagedKeyProvider.encodeToStr(originalBytes);
+ byte[] decoded = ManagedKeyProvider.decodeToBytes(encoded);
+
+ assertTrue("Decoded bytes should match original bytes",
+ Arrays.equals(originalBytes, decoded));
+ }
+
+ @Test(expected = Exception.class) public void testDecodeToBytes_InvalidInput()
+ throws Exception {
+ String invalidInput = "This is not a valid Base64 string!";
+ ManagedKeyProvider.decodeToBytes(invalidInput);
+ }
+
+ @Test public void testRoundTrip_LargeInput() throws Exception {
+ byte[] largeInput = new byte[1000];
+ for (int i = 0; i < largeInput.length; i++) {
+ largeInput[i] = (byte) (i % 256);
+ }
+
+ String encoded = ManagedKeyProvider.encodeToStr(largeInput);
+ byte[] decoded = ManagedKeyProvider.decodeToBytes(encoded);
+
+ assertTrue("Large input should survive round-trip encoding and decoding",
+ Arrays.equals(largeInput, decoded));
+ }
+ }
+}
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto b/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto
new file mode 100644
index 000000000000..c6a3a31f6183
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "ManagedKeysProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+message ManagedKeysRequest {
+ required string key_cust = 1;
+ required string key_namespace = 2;
+}
+
+enum ManagedKeyState {
+ KEY_ACTIVE = 1;
+ KEY_INACTIVE = 2;
+ KEY_FAILED = 3;
+ KEY_DISABLED = 4;
+}
+
+message ManagedKeysResponse {
+ required string key_cust = 1;
+ required string key_namespace = 2;
+ required ManagedKeyState key_state = 3;
+ optional string key_metadata = 4;
+ optional int64 refresh_timestamp = 5;
+}
+
+message GetManagedKeysResponse {
+ repeated ManagedKeysResponse state = 1;
+}
+
+service ManagedKeysService {
+ rpc EnableKeyManagement(ManagedKeysRequest)
+ returns (GetManagedKeysResponse);
+ rpc GetManagedKeys(ManagedKeysRequest)
+ returns (GetManagedKeysResponse);
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
index bf9640196f62..12cc7433e7be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
@@ -52,6 +52,11 @@
import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdminImpl;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
@@ -60,6 +65,7 @@
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.ShutdownHook;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
@@ -187,6 +193,10 @@ public abstract class HBaseServerBase> extends
protected final NettyEventLoopGroupConfig eventLoopGroupConfig;
+ private SystemKeyCache systemKeyCache;
+ protected KeymetaAdminImpl keymetaAdmin;
+ protected ManagedKeyDataCache managedKeyDataCache;
+
private void setupSignalHandlers() {
if (!SystemUtils.IS_OS_WINDOWS) {
HBasePlatformDependent.handle("HUP", (number, name) -> {
@@ -283,6 +293,8 @@ public HBaseServerBase(Configuration conf, String name) throws IOException {
initializeFileSystem();
+ keymetaAdmin = new KeymetaAdminImpl(this);
+
int choreServiceInitialSize =
conf.getInt(CHORE_SERVICE_INITIAL_POOL_SIZE, DEFAULT_CHORE_SERVICE_INITIAL_POOL_SIZE);
this.choreService = new ChoreService(getName(), choreServiceInitialSize, true);
@@ -403,6 +415,27 @@ public ZKWatcher getZooKeeper() {
return zooKeeper;
}
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
+ return keymetaAdmin;
+ }
+
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return managedKeyDataCache;
+ }
+
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
+ return systemKeyCache;
+ }
+
+ protected void buildSystemKeyCache() throws IOException {
+ if (systemKeyCache == null && SecurityUtil.isKeyManagementEnabled(conf)) {
+ systemKeyCache = SystemKeyCache.createCache(new SystemKeyAccessor(this));
+ }
+ }
+
protected final void shutdownChore(ScheduledChore chore) {
if (chore != null) {
chore.shutdown();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 32ad587ad96d..0996fbf21c52 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -38,6 +38,9 @@
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
@@ -256,6 +259,18 @@ public ChoreService getChoreService() {
return null;
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public void updateRegionFavoredNodesMapping(String encodedRegionName,
List favoredNodes) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index d99807e46b06..c0ddad9109ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -23,6 +23,9 @@
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
@@ -83,6 +86,21 @@ default AsyncConnection getAsyncConnection() {
/** Returns The {@link ChoreService} instance for this server */
ChoreService getChoreService();
+ /**
+ * @return the cache for cluster keys.
+ */
+ public SystemKeyCache getSystemKeyCache();
+
+ /**
+ * @return the cache for managed keys.
+ */
+ public ManagedKeyDataCache getManagedKeyDataCache();
+
+ /**
+ * @return the admin for keymeta.
+ */
+ public KeymetaAdmin getKeymetaAdmin();
+
/** Returns Return the FileSystem object used (can return null!). */
// TODO: Distinguish between "dataFs" and "walFs".
default FileSystem getFileSystem() {
@@ -104,4 +122,4 @@ default FileSystem getFileSystem() {
default boolean isStopping() {
return false;
}
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
new file mode 100644
index 000000000000..31c770785604
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.security.SecurityUtil;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A base class for all keymeta accessor/manager implementations.
+ */
+@InterfaceAudience.Private
+public abstract class KeyManagementBase {
+ protected static final Logger LOG = LoggerFactory.getLogger(KeyManagementBase.class);
+
+ private Server server;
+ private final Configuration configuration;
+
+ private Boolean isDynamicLookupEnabled;
+ private Boolean isKeyManagementEnabled;
+ private Integer perCustNamespaceActiveKeyCount;
+
+ /**
+ * Construct with a server instance. Configuration is derived from the server.
+ *
+ * @param server the server instance
+ */
+ public KeyManagementBase(Server server) {
+ this(server.getConfiguration());
+ this.server = server;
+ }
+
+ /**
+ * Construct with a custom configuration and no server.
+ *
+ * @param configuration the configuration instance
+ */
+ public KeyManagementBase(Configuration configuration) {
+ if (configuration == null) {
+ throw new IllegalArgumentException("Configuration must be non-null");
+ }
+ this.configuration = configuration;
+ }
+
+ protected Server getServer() {
+ return server;
+ }
+
+ protected Configuration getConfiguration() {
+ return configuration;
+ }
+
+ /**
+ * A utility method for getting the managed key provider.
+ * @return the key provider
+ * @throws RuntimeException if no provider is configured or if the configured provider is not an
+ * instance of ManagedKeyProvider
+ */
+ protected ManagedKeyProvider getKeyProvider() {
+ KeyProvider provider = Encryption.getKeyProvider(getConfiguration());
+ if (!(provider instanceof ManagedKeyProvider)) {
+ throw new RuntimeException("KeyProvider: " + provider.getClass().getName()
+ + " expected to be of type ManagedKeyProvider");
+ }
+ return (ManagedKeyProvider) provider;
+ }
+
+ /**
+ * A utility method for checking if dynamic lookup is enabled.
+ * @return true if dynamic lookup is enabled
+ */
+ protected boolean isDynamicLookupEnabled() {
+ if (isDynamicLookupEnabled == null) {
+ isDynamicLookupEnabled = getConfiguration().getBoolean(
+ HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_DEFAULT_ENABLED);
+ }
+ return isDynamicLookupEnabled;
+ }
+
+ /**
+ * Check if key management is enabled, otherwise throw exception.
+ * @throws IOException if key management is not enabled.
+ */
+ protected void assertKeyManagementEnabled() throws IOException {
+ if (!isKeyManagementEnabled()) {
+ throw new IOException("Key manage is currently not enabled in HBase configuration");
+ }
+ }
+
+ protected boolean isKeyManagementEnabled() {
+ if (isKeyManagementEnabled == null) {
+ isKeyManagementEnabled = SecurityUtil.isKeyManagementEnabled(getConfiguration());
+ }
+ return isKeyManagementEnabled;
+ }
+
+ /**
+ * Utility function to retrieves a managed key from the key provider. If an existing key is
+ * provided and the retrieved key is the same as the existing key, it will be ignored.
+ *
+ * @param encKeyCust the encoded key custodian
+ * @param key_cust the key custodian
+ * @param keyNamespace the key namespace
+ * @param accessor the accessor to use to persist the key. If null, the key will not be persisted.
+ * @param existingActiveKey the existing key, typically the active key already retrieved from the
+ * key provider, can be null.
+ * @return the retrieved key, or null if no key could be retrieved
+ * @throws IOException if an error occurs
+ * @throws KeyException if an error occurs
+ */
+ protected ManagedKeyData retrieveActiveKey(String encKeyCust, byte[] key_cust,
+ String keyNamespace, KeymetaTableAccessor accessor, ManagedKeyData existingActiveKey)
+ throws IOException, KeyException {
+ ManagedKeyProvider provider = getKeyProvider();
+ ManagedKeyData pbeKey = provider.getManagedKey(key_cust, keyNamespace);
+ if (pbeKey == null) {
+ throw new IOException("Invalid null managed key received from key provider");
+ }
+ /* Will be useful when refresh API is implemented.
+ if (existingActiveKey != null && existingActiveKey.equals(pbeKey)) {
+ LOG.info("retrieveManagedKey: no change in key for (custodian: {}, namespace: {}",
+ encKeyCust, keyNamespace);
+ return null;
+ }
+ // TODO: If existingActiveKey is not null, we should update the key state to INACTIVE.
+ */
+ LOG.info("retrieveManagedKey: got managed key with status: {} and metadata: {} for "
+ + "(custodian: {}, namespace: {})", pbeKey.getKeyState(), pbeKey.getKeyMetadata(),
+ encKeyCust, keyNamespace);
+ if (accessor != null) {
+ accessor.addKey(pbeKey);
+ }
+ return pbeKey;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java
new file mode 100644
index 000000000000..02fb31b770e6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class KeymetaAdminImpl extends KeymetaTableAccessor implements KeymetaAdmin {
+ private static final Logger LOG = LoggerFactory.getLogger(KeymetaAdminImpl.class);
+
+ public KeymetaAdminImpl(Server server) {
+ super(server);
+ }
+
+ @Override
+ public List enableKeyManagement(String keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ LOG.info("Trying to enable key management on custodian: {} under namespace: {}", keyCust,
+ keyNamespace);
+ byte[] key_cust = ManagedKeyProvider.decodeToBytes(keyCust);
+
+ // Check if (cust, namespace) pair is already enabled and has an active key.
+ ManagedKeyData activeKey = getActiveKey(key_cust, keyNamespace);
+ if (activeKey != null) {
+ LOG.info("enableManagedKeys: specified (custodian: {}, namespace: {}) already has "
+ + "an active managed key with metadata: {}", keyCust, keyNamespace,
+ activeKey.getKeyMetadata());
+ return Collections.singletonList(activeKey);
+ }
+
+ // Retrieve a single key from provider
+ ManagedKeyData retrievedKey = retrieveActiveKey(keyCust, key_cust, keyNamespace, this, null);
+ return Collections.singletonList(retrievedKey);
+ }
+
+ @Override
+ public List getManagedKeys(String keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ LOG.info("Getting key statuses for custodian: {} under namespace: {}", keyCust,
+ keyNamespace);
+ byte[] key_cust = ManagedKeyProvider.decodeToBytes(keyCust);
+ return getAllKeys(key_cust, keyNamespace);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java
new file mode 100644
index 000000000000..68f78cd12dd3
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class KeymetaMasterService extends KeyManagementBase {
+ private static final Logger LOG = LoggerFactory.getLogger(KeymetaMasterService.class);
+
+ private final MasterServices master;
+
+ private static final TableDescriptorBuilder TABLE_DESCRIPTOR_BUILDER = TableDescriptorBuilder
+ .newBuilder(KeymetaTableAccessor.KEY_META_TABLE_NAME).setRegionReplication(1)
+ .setPriority(HConstants.SYSTEMTABLE_QOS)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(
+ KeymetaTableAccessor.KEY_META_INFO_FAMILY)
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setMaxVersions(1)
+ .setInMemory(true)
+ .build());
+
+ public KeymetaMasterService(MasterServices masterServices) {
+ super(masterServices);
+ master = masterServices;
+ }
+
+ public void init() throws IOException {
+ if (!isKeyManagementEnabled()) {
+ return;
+ }
+ if (!master.getTableDescriptors().exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)) {
+ LOG.info("{} table not found. Creating.",
+ KeymetaTableAccessor.KEY_META_TABLE_NAME.getNameWithNamespaceInclAsString());
+ master.createSystemTable(TABLE_DESCRIPTOR_BUILDER.build());
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
new file mode 100644
index 000000000000..fde42b8dd295
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.Base64;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.GetManagedKeysResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysService;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+import org.apache.hbase.thirdparty.com.google.protobuf.Service;
+
+/**
+ * This class implements a coprocessor service endpoint for the key management metadata operations.
+ * It handles the following methods:
+ *
+ * This endpoint is designed to work in conjunction with the {@link KeymetaAdmin}
+ * interface, which provides the actual implementation of the key metadata operations.
+ *
+ */
+@CoreCoprocessor @InterfaceAudience.Private
+public class KeymetaServiceEndpoint implements MasterCoprocessor {
+ private static final Logger LOG = LoggerFactory.getLogger(KeymetaServiceEndpoint.class);
+
+ private MasterServices master = null;
+
+ private final ManagedKeysService managedKeysService = new KeymetaAdminServiceImpl();
+
+ /**
+ * Starts the coprocessor by initializing the reference to the
+ * {@link org.apache.hadoop.hbase.master.MasterServices} * instance.
+ *
+ * @param env The coprocessor environment.
+ * @throws IOException If an error occurs during initialization.
+ */
+ @Override
+ public void start(CoprocessorEnvironment env) throws IOException {
+ if (!(env instanceof HasMasterServices)) {
+ throw new IOException("Does not implement HMasterServices");
+ }
+
+ master = ((HasMasterServices) env).getMasterServices();
+ }
+
+ /**
+ * Returns an iterable of the available coprocessor services, which includes the
+ * {@link ManagedKeysService} implemented by
+ * {@link KeymetaServiceEndpoint.KeymetaAdminServiceImpl}.
+ *
+ * @return An iterable of the available coprocessor services.
+ */
+ @Override
+ public Iterable getServices() {
+ return Collections.singleton(managedKeysService);
+ }
+
+ /**
+ * The implementation of the {@link ManagedKeysProtos.ManagedKeysService}
+ * interface, which provides the actual method implementations for enabling key management.
+ */
+ @InterfaceAudience.Private
+ public class KeymetaAdminServiceImpl extends ManagedKeysService {
+
+ /**
+ * Enables key management for a given tenant and namespace, as specified in the provided
+ * request.
+ *
+ * @param controller The RPC controller.
+ * @param request The request containing the tenant and table specifications.
+ * @param done The callback to be invoked with the response.
+ */
+ @Override
+ public void enableKeyManagement(RpcController controller, ManagedKeysRequest request,
+ RpcCallback done) {
+ ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request);
+ if (builder.getKeyCust() != null && ! builder.getKeyCust().isEmpty()) {
+ try {
+ List managedKeyStates = master.getKeymetaAdmin()
+ .enableKeyManagement(request.getKeyCust(), request.getKeyNamespace());
+ done.run(generateKeyStateResponse(managedKeyStates, builder));
+ } catch (IOException e) {
+ CoprocessorRpcUtils.setControllerException(controller, e);
+ } catch (KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new IOException(e));
+ }
+ }
+ }
+
+ @Override
+ public void getManagedKeys(RpcController controller, ManagedKeysRequest request,
+ RpcCallback done) {
+ ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request);
+ if (builder.getKeyCust() != null && ! builder.getKeyCust().isEmpty()) {
+ try {
+ List managedKeyStates = master.getKeymetaAdmin()
+ .getManagedKeys(request.getKeyCust(), request.getKeyNamespace());
+ done.run(generateKeyStateResponse(managedKeyStates, builder));
+ } catch (IOException e) {
+ CoprocessorRpcUtils.setControllerException(controller, e);
+ } catch (KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new IOException(e));
+ }
+ }
+ }
+ }
+
+ @InterfaceAudience.Private
+ public static ManagedKeysResponse.Builder getResponseBuilder(RpcController controller,
+ ManagedKeysRequest request) {
+ ManagedKeysResponse.Builder builder = ManagedKeysResponse.newBuilder()
+ .setKeyNamespace(request.getKeyNamespace());
+ byte[] key_cust = convertToKeyCustBytes(controller, request, builder);
+ if (key_cust != null) {
+ builder.setKeyCustBytes(ByteString.copyFrom(key_cust));
+ }
+ return builder;
+ }
+
+ // Assumes that all ManagedKeyData objects belong to the same custodian and namespace.
+ @InterfaceAudience.Private
+ public static GetManagedKeysResponse generateKeyStateResponse(
+ List managedKeyStates, ManagedKeysResponse.Builder builder) {
+ GetManagedKeysResponse.Builder responseBuilder = GetManagedKeysResponse.newBuilder();
+ for (ManagedKeyData keyData: managedKeyStates) {
+ builder.setKeyState(ManagedKeysProtos.ManagedKeyState.valueOf(
+ keyData.getKeyState().getVal()))
+ .setKeyMetadata(keyData.getKeyMetadata())
+ .setRefreshTimestamp(keyData.getRefreshTimestamp())
+ ;
+ responseBuilder.addState(builder.build());
+ }
+ return responseBuilder.build();
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] convertToKeyCustBytes(RpcController controller, ManagedKeysRequest request,
+ ManagedKeysResponse.Builder builder) {
+ byte[] key_cust = null;
+ try {
+ key_cust = Base64.getDecoder().decode(request.getKeyCust());
+ } catch (IllegalArgumentException e) {
+ builder.setKeyState(ManagedKeysProtos.ManagedKeyState.KEY_FAILED);
+ CoprocessorRpcUtils.setControllerException(controller, new IOException(
+ "Failed to decode specified prefix as Base64 string: " + request.getKeyCust(), e));
+ }
+ return key_cust;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
new file mode 100644
index 000000000000..08d92a4e1a20
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Accessor for keymeta table as part of key management.
+ */
+@InterfaceAudience.Private
+public class KeymetaTableAccessor extends KeyManagementBase {
+ private static final String KEY_META_INFO_FAMILY_STR = "info";
+
+ public static final byte[] KEY_META_INFO_FAMILY = Bytes.toBytes(KEY_META_INFO_FAMILY_STR);
+
+ public static final TableName KEY_META_TABLE_NAME =
+ TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "keymeta");
+
+ public static final String DEK_METADATA_QUAL_NAME = "m";
+ public static final byte[] DEK_METADATA_QUAL_BYTES = Bytes.toBytes(DEK_METADATA_QUAL_NAME);
+
+ public static final String DEK_CHECKSUM_QUAL_NAME = "c";
+ public static final byte[] DEK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(DEK_CHECKSUM_QUAL_NAME);
+
+ public static final String DEK_WRAPPED_BY_STK_QUAL_NAME = "w";
+ public static final byte[] DEK_WRAPPED_BY_STK_QUAL_BYTES =
+ Bytes.toBytes(DEK_WRAPPED_BY_STK_QUAL_NAME);
+
+ public static final String STK_CHECKSUM_QUAL_NAME = "s";
+ public static final byte[] STK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(STK_CHECKSUM_QUAL_NAME);
+
+ public static final String REFRESHED_TIMESTAMP_QUAL_NAME = "t";
+ public static final byte[] REFRESHED_TIMESTAMP_QUAL_BYTES =
+ Bytes.toBytes(REFRESHED_TIMESTAMP_QUAL_NAME);
+
+ public static final String KEY_STATE_QUAL_NAME = "k";
+ public static final byte[] KEY_STATE_QUAL_BYTES = Bytes.toBytes(KEY_STATE_QUAL_NAME);
+
+ public KeymetaTableAccessor(Server server) {
+ super(server);
+ }
+
+ /**
+ * Add the specified key to the keymeta table.
+ * @param keyData The key data.
+ * @throws IOException when there is an underlying IOException.
+ */
+ public void addKey(ManagedKeyData keyData) throws IOException {
+ assertKeyManagementEnabled();
+ List puts = new ArrayList<>(2);
+ if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
+ puts.add(addMutationColumns(new Put(constructRowKeyForCustNamespace(keyData)),
+ keyData));
+ }
+ final Put putForMetadata = addMutationColumns(new Put(constructRowKeyForMetadata(keyData)),
+ keyData);
+ puts.add(putForMetadata);
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ table.put(puts);
+ }
+ }
+
+ /**
+ * Get all the keys for the specified key_cust and key_namespace.
+ *
+ * @param key_cust The key custodian.
+ * @param keyNamespace The namespace
+ * @return a list of key data, one for each key, can be empty when none were found.
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
+ public List getAllKeys(byte[] key_cust, String keyNamespace)
+ throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ Connection connection = getServer().getConnection();
+ byte[] prefixForScan = Bytes.add(Bytes.toBytes(key_cust.length), key_cust,
+ Bytes.toBytes(keyNamespace));
+ PrefixFilter prefixFilter = new PrefixFilter(prefixForScan);
+ Scan scan = new Scan();
+ scan.setFilter(prefixFilter);
+ scan.addFamily(KEY_META_INFO_FAMILY);
+
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ ResultScanner scanner = table.getScanner(scan);
+ Set allKeys = new HashSet<>();
+ for (Result result : scanner) {
+ ManagedKeyData keyData = parseFromResult(getServer(), key_cust, keyNamespace, result);
+ if (keyData != null) {
+ allKeys.add(keyData);
+ }
+ }
+ return allKeys.stream().toList();
+ }
+ }
+
+ /**
+ * Get the active key for the specified key_cust and key_namespace.
+ *
+ * @param key_cust The prefix
+ * @param keyNamespace The namespace
+ * @return the active key data, or null if no active key found
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public ManagedKeyData getActiveKey(byte[] key_cust, String keyNamespace)
+ throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ Connection connection = getServer().getConnection();
+ byte[] rowkeyForGet = constructRowKeyForCustNamespace(key_cust, keyNamespace);
+ Get get = new Get(rowkeyForGet);
+ get.addColumn(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES);
+
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ Result result = table.get(get);
+ return parseFromResult(getServer(), key_cust, keyNamespace, result);
+ }
+ }
+
+ /**
+ * Get the specific key identified by key_cust, keyNamespace and keyState.
+ *
+ * @param key_cust The prefix.
+ * @param keyNamespace The namespace.
+ * @param keyState The state of the key.
+ * @return the key or {@code null}
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, ManagedKeyState keyState)
+ throws IOException, KeyException {
+ return getKeyInternal(key_cust, keyNamespace, new byte[] { keyState.getVal() });
+ }
+
+ /**
+ * Get the specific key identified by key_cust, keyNamespace and keyMetadata.
+ *
+ * @param key_cust The prefix.
+ * @param keyNamespace The namespace.
+ * @param keyMetadata The metadata.
+ * @return the key or {@code null}
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, String keyMetadata)
+ throws IOException, KeyException {
+ return getKeyInternal(key_cust, keyNamespace,
+ ManagedKeyData.constructMetadataHash(keyMetadata));
+ }
+
+ /**
+ * Internal helper method to get a key using the provided metadata hash.
+ *
+ * @param key_cust The prefix.
+ * @param keyNamespace The namespace.
+ * @param keyMetadataHash The metadata hash or state value.
+ * @return the key or {@code null}
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ private ManagedKeyData getKeyInternal(byte[] key_cust, String keyNamespace,
+ byte[] keyMetadataHash) throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ byte[] rowKey = constructRowKeyForMetadata(key_cust, keyNamespace, keyMetadataHash);
+ Result result = table.get(new Get(rowKey));
+ return parseFromResult(getServer(), key_cust, keyNamespace, result);
+ }
+ }
+
+ /**
+ * Add the mutation columns to the given Put that are derived from the keyData.
+ */
+ private Put addMutationColumns(Put put, ManagedKeyData keyData) throws IOException {
+ ManagedKeyData latestSystemKey = getServer().getSystemKeyCache().getLatestSystemKey();
+ if (keyData.getTheKey() != null) {
+ byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getServer().getConfiguration(), null,
+ keyData.getTheKey(), latestSystemKey.getTheKey());
+ put.addColumn(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES,
+ Bytes.toBytes(keyData.getKeyChecksum()))
+ .addColumn(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES, dekWrappedBySTK)
+ .addColumn(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES,
+ Bytes.toBytes(latestSystemKey.getKeyChecksum()))
+ ;
+ }
+ Put result = put.setDurability(Durability.SKIP_WAL)
+ .setPriority(HConstants.SYSTEMTABLE_QOS)
+ .addColumn(KEY_META_INFO_FAMILY, REFRESHED_TIMESTAMP_QUAL_BYTES,
+ Bytes.toBytes(keyData.getRefreshTimestamp()))
+ .addColumn(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES,
+ new byte[] { keyData.getKeyState().getVal() })
+ ;
+
+ // Only add metadata column if metadata is not null
+ String metadata = keyData.getKeyMetadata();
+ if (metadata != null) {
+ result.addColumn(KEY_META_INFO_FAMILY, DEK_METADATA_QUAL_BYTES, metadata.getBytes());
+ }
+
+ return result;
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForMetadata(ManagedKeyData keyData) {
+ byte[] keyMetadataHash;
+ if (keyData.getKeyState() == ManagedKeyState.FAILED && keyData.getKeyMetadata() == null) {
+ // For FAILED state with null metadata, use state as metadata
+ keyMetadataHash = new byte[] { keyData.getKeyState().getVal() };
+ } else {
+ keyMetadataHash = keyData.getKeyMetadataHash();
+ }
+ return constructRowKeyForMetadata(keyData.getKeyCustodian(), keyData.getKeyNamespace(),
+ keyMetadataHash);
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForMetadata(byte[] key_cust, String keyNamespace,
+ byte[] keyMetadataHash) {
+ return Bytes.add(constructRowKeyForCustNamespace(key_cust, keyNamespace), keyMetadataHash);
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForCustNamespace(ManagedKeyData keyData) {
+ return constructRowKeyForCustNamespace(keyData.getKeyCustodian(), keyData.getKeyNamespace());
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForCustNamespace(byte[] key_cust, String keyNamespace) {
+ int custLength = key_cust.length;
+ return Bytes.add(Bytes.toBytes(custLength), key_cust, Bytes.toBytesBinary(keyNamespace));
+ }
+
+ @InterfaceAudience.Private
+ public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, String keyNamespace,
+ Result result) throws IOException, KeyException {
+ if (result == null || result.isEmpty()) {
+ return null;
+ }
+ ManagedKeyState keyState = ManagedKeyState.forValue(
+ result.getValue(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES)[0]);
+ String dekMetadata = Bytes.toString(result.getValue(KEY_META_INFO_FAMILY,
+ DEK_METADATA_QUAL_BYTES));
+ byte[] dekWrappedByStk = result.getValue(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES);
+ if ((keyState == ManagedKeyState.ACTIVE || keyState == ManagedKeyState.INACTIVE)
+ && dekWrappedByStk == null) {
+ throw new IOException(keyState + " key must have a wrapped key");
+ }
+ Key dek = null;
+ if (dekWrappedByStk != null) {
+ long stkChecksum =
+ Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES));
+ ManagedKeyData clusterKey = server.getSystemKeyCache().getSystemKeyByChecksum(stkChecksum);
+ if (clusterKey == null) {
+ LOG.error("Dropping key with metadata: {} as STK with checksum: {} is unavailable",
+ dekMetadata, stkChecksum);
+ return null;
+ }
+ dek = EncryptionUtil.unwrapKey(server.getConfiguration(), null, dekWrappedByStk,
+ clusterKey.getTheKey());
+ }
+ long refreshedTimestamp = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY,
+ REFRESHED_TIMESTAMP_QUAL_BYTES));
+ ManagedKeyData
+ dekKeyData = new ManagedKeyData(key_cust, keyNamespace, dek, keyState, dekMetadata,
+ refreshedTimestamp);
+ if (dek != null) {
+ long dekChecksum = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY,
+ DEK_CHECKSUM_QUAL_BYTES));
+ if (dekKeyData.getKeyChecksum() != dekChecksum) {
+ LOG.error("Dropping key, current key checksum: {} didn't match the expected checksum: {}"
+ + " for key with metadata: {}", dekKeyData.getKeyChecksum(), dekChecksum, dekMetadata);
+ return null;
+ }
+ }
+ return dekKeyData;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
new file mode 100644
index 000000000000..87c2195543c2
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.Objects;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+
+/**
+ * In-memory cache for ManagedKeyData entries, using key metadata as the cache key. Uses two
+ * independent Caffeine caches: one for general key data and one for active keys only with
+ * hierarchical structure for efficient single key retrieval.
+ */
+@InterfaceAudience.Private
+public class ManagedKeyDataCache extends KeyManagementBase {
+ private static final Logger LOG = LoggerFactory.getLogger(ManagedKeyDataCache.class);
+
+ private Cache cacheByMetadata;
+ private Cache activeKeysCache;
+ private final KeymetaTableAccessor keymetaAccessor;
+
+ /**
+ * Composite key for active keys cache containing custodian and namespace.
+ * NOTE: Pair won't work out of the box because it won't work with byte[] as is.
+ */
+ @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.UNITTEST })
+ public static class ActiveKeysCacheKey {
+ private final byte[] custodian;
+ private final String namespace;
+
+ public ActiveKeysCacheKey(byte[] custodian, String namespace) {
+ this.custodian = custodian;
+ this.namespace = namespace;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null || getClass() != obj.getClass())
+ return false;
+ ActiveKeysCacheKey cacheKey = (ActiveKeysCacheKey) obj;
+ return Bytes.equals(custodian, cacheKey.custodian) &&
+ Objects.equals(namespace, cacheKey.namespace);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(Bytes.hashCode(custodian), namespace);
+ }
+ }
+
+ /**
+ * Constructs the ManagedKeyDataCache with the given configuration and keymeta accessor. When
+ * keymetaAccessor is null, L2 lookup is disabled and dynamic lookup is enabled.
+ *
+ * @param conf The configuration, can't be null.
+ * @param keymetaAccessor The keymeta accessor, can be null.
+ */
+ public ManagedKeyDataCache(Configuration conf, KeymetaTableAccessor keymetaAccessor) {
+ super(conf);
+ this.keymetaAccessor = keymetaAccessor;
+ if (keymetaAccessor == null) {
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, true);
+ }
+
+ int maxEntries = conf.getInt(
+ HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT);
+ int activeKeysMaxEntries = conf.getInt(
+ HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT);
+ this.cacheByMetadata = Caffeine.newBuilder()
+ .maximumSize(maxEntries)
+ .build();
+ this.activeKeysCache = Caffeine.newBuilder()
+ .maximumSize(activeKeysMaxEntries)
+ .build();
+ }
+
+ /**
+ * Retrieves an entry from the cache, loading it from L2 if KeymetaTableAccessor is available.
+ * When L2 is not available, it will try to load from provider, unless dynamic lookup is disabled.
+ *
+ * @param key_cust the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadata the key metadata of the entry to be retrieved
+ * @param wrappedKey The DEK key material encrypted with the corresponding
+ * KEK, if available.
+ * @return the corresponding ManagedKeyData entry, or null if not found
+ * @throws IOException if an error occurs while loading from KeymetaTableAccessor
+ * @throws KeyException if an error occurs while loading from KeymetaTableAccessor
+ */
+ public ManagedKeyData getEntry(byte[] key_cust, String keyNamespace, String keyMetadata,
+ byte[] wrappedKey) throws IOException, KeyException {
+ ManagedKeyData entry = cacheByMetadata.get(keyMetadata, metadata -> {
+ // First check if it's in the active keys cache
+ ManagedKeyData keyData = getFromActiveKeysCache(key_cust, keyNamespace, keyMetadata);
+
+ // Try to load from L2
+ if (keyData == null && keymetaAccessor != null) {
+ try {
+ keyData = keymetaAccessor.getKey(key_cust, keyNamespace, metadata);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn("Failed to load key from KeymetaTableAccessor for metadata: {}", metadata, e);
+ }
+ }
+
+ // If not found in L2 and dynamic lookup is enabled, try with Key Provider
+ if (keyData == null && isDynamicLookupEnabled()) {
+ try {
+ ManagedKeyProvider provider = getKeyProvider();
+ keyData = provider.unwrapKey(metadata, wrappedKey);
+ LOG.info("Got key data with status: {} and metadata: {} for prefix: {}",
+ keyData.getKeyState(), keyData.getKeyMetadata(),
+ ManagedKeyProvider.encodeToStr(key_cust));
+ // Add to KeymetaTableAccessor for future L2 lookups
+ if (keymetaAccessor != null) {
+ try {
+ keymetaAccessor.addKey(keyData);
+ } catch (IOException | RuntimeException e) {
+ LOG.warn("Failed to add key to KeymetaTableAccessor for metadata: {}", metadata, e);
+ }
+ }
+ } catch (IOException | RuntimeException e) {
+ LOG.warn("Failed to load key from provider for metadata: {}", metadata, e);
+ }
+ }
+
+ if (keyData == null) {
+ keyData = new ManagedKeyData(key_cust, keyNamespace, null, ManagedKeyState.FAILED,
+ keyMetadata);
+ }
+
+ // Also update activeKeysCache if relevant and is missing.
+ if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
+ activeKeysCache.asMap().putIfAbsent(new ActiveKeysCacheKey(key_cust, keyNamespace),
+ keyData);
+ }
+
+ if (!ManagedKeyState.isUsable(keyData.getKeyState())) {
+ LOG.info("Failed to get usable key data with metadata: {} for prefix: {}",
+ metadata, ManagedKeyProvider.encodeToStr(key_cust));
+ }
+ return keyData;
+ });
+ if (ManagedKeyState.isUsable(entry.getKeyState())) {
+ return entry;
+ }
+ return null;
+ }
+
+ /**
+ * Retrieves an existing key from the active keys cache.
+ *
+ * @param key_cust the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadata the key metadata
+ * @return the ManagedKeyData if found, null otherwise
+ */
+ private ManagedKeyData getFromActiveKeysCache(byte[] key_cust, String keyNamespace,
+ String keyMetadata) {
+ ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, keyNamespace);
+ ManagedKeyData keyData = activeKeysCache.getIfPresent(cacheKey);
+ if (keyData != null && keyData.getKeyMetadata().equals(keyMetadata)) {
+ return keyData;
+ }
+ return null;
+ }
+
+ /**
+ * @return the approximate number of entries in the main cache which is meant for general lookup
+ * by key metadata.
+ */
+ public int getGenericCacheEntryCount() {
+ return (int) cacheByMetadata.estimatedSize();
+ }
+
+ /**
+ * @return the approximate number of entries in the active keys cache
+ */
+ public int getActiveCacheEntryCount() {
+ return (int) activeKeysCache.estimatedSize();
+ }
+
+ /**
+ * Retrieves the active entry from the cache based on its key custodian and key namespace.
+ * This method also loads active keys from provider if not found in cache.
+ *
+ * @param key_cust The key custodian.
+ * @param keyNamespace the key namespace to search for
+ * @return the ManagedKeyData entry with the given custodian and ACTIVE status, or null if
+ * not found
+ */
+ public ManagedKeyData getActiveEntry(byte[] key_cust, String keyNamespace) {
+ ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, keyNamespace);
+
+ ManagedKeyData keyData = activeKeysCache.get(cacheKey, key -> {
+ ManagedKeyData retrievedKey = null;
+
+ // Try to load from KeymetaTableAccessor if not found in cache
+ if (keymetaAccessor != null) {
+ try {
+ retrievedKey = keymetaAccessor.getActiveKey(key_cust, keyNamespace);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn("Failed to load active key from KeymetaTableAccessor for custodian: {} "
+ + "namespace: {}", ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e);
+ }
+ }
+
+ // As a last ditch effort, load active key from provider. This typically happens for
+ // standalone tools.
+ if (retrievedKey == null && isDynamicLookupEnabled()) {
+ try {
+ String keyCust = ManagedKeyProvider.encodeToStr(key_cust);
+ retrievedKey = retrieveActiveKey(keyCust, key_cust, keyNamespace, keymetaAccessor, null);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn("Failed to load active key from provider for custodian: {} namespace: {}",
+ ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e);
+ }
+ }
+
+ if (retrievedKey == null) {
+ retrievedKey = new ManagedKeyData(key_cust, keyNamespace, null, ManagedKeyState.FAILED,
+ null);
+ }
+
+ return retrievedKey;
+ });
+
+ if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
+ return keyData;
+ }
+ return null;
+ }
+
+ /**
+ * Invalidates all entries in the cache.
+ */
+ public void invalidateAll() {
+ cacheByMetadata.invalidateAll();
+ activeKeysCache.invalidateAll();
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
new file mode 100644
index 000000000000..5a89d38a0bb2
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class SystemKeyAccessor extends KeyManagementBase {
+ protected final Path systemKeyDir;
+
+ public SystemKeyAccessor(Server server) throws IOException {
+ super(server);
+ this.systemKeyDir = CommonFSUtils.getSystemKeyDir(server.getConfiguration());
+ }
+
+ /**
+ * Return both the latest system key file and all system key files.
+ * @return a pair of the latest system key file and all system key files
+ * @throws IOException if there is an error getting the latest system key file or no cluster key
+ * is initialized yet.
+ */
+ public Pair> getLatestSystemKeyFile() throws IOException {
+ if (! isKeyManagementEnabled()) {
+ return new Pair<>(null, null);
+ }
+ List allClusterKeyFiles = getAllSystemKeyFiles();
+ if (allClusterKeyFiles.isEmpty()) {
+ throw new RuntimeException("No cluster key initialized yet");
+ }
+ int currentMaxSeqNum = SystemKeyAccessor.extractKeySequence(allClusterKeyFiles.get(0));
+ return new Pair<>(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + currentMaxSeqNum),
+ allClusterKeyFiles);
+ }
+
+ /**
+ * Return all available cluster key files and return them in the order of latest to oldest.
+ * If no cluster key files are available, then return an empty list. If key management is not
+ * enabled, then return null.
+ *
+ * @return a list of all available cluster key files
+ * @throws IOException if there is an error getting the cluster key files
+ */
+ public List getAllSystemKeyFiles() throws IOException {
+ if (!isKeyManagementEnabled()) {
+ return null;
+ }
+ FileSystem fs = getServer().getFileSystem();
+ Map clusterKeys = new TreeMap<>(Comparator.reverseOrder());
+ for (FileStatus st : fs.globStatus(new Path(systemKeyDir,
+ SYSTEM_KEY_FILE_PREFIX + "*"))) {
+ Path keyPath = st.getPath();
+ int seqNum = extractSystemKeySeqNum(keyPath);
+ clusterKeys.put(seqNum, keyPath);
+ }
+
+ return new ArrayList<>(clusterKeys.values());
+ }
+
+ public ManagedKeyData loadSystemKey(Path keyPath) throws IOException {
+ ManagedKeyProvider provider = getKeyProvider();
+ ManagedKeyData keyData = provider.unwrapKey(loadKeyMetadata(keyPath), null);
+ if (keyData == null) {
+ throw new RuntimeException("Failed to load system key from: " + keyPath);
+ }
+ return keyData;
+ }
+
+ @InterfaceAudience.Private
+ public static int extractSystemKeySeqNum(Path keyPath) throws IOException {
+ if (keyPath.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) {
+ try {
+ return Integer.parseInt(keyPath.getName().substring(SYSTEM_KEY_FILE_PREFIX.length()));
+ }
+ catch (NumberFormatException e) {
+ LOG.error("Invalid file name for a cluster key: {}", keyPath, e);
+ }
+ }
+ throw new IOException("Couldn't parse key file name: " + keyPath.getName());
+ }
+
+ /**
+ * Extract the key sequence number from the cluster key file name.
+ * @param clusterKeyFile the path to the cluster key file
+ * @return The sequence or {@code -1} if not a valid sequence file.
+ * @throws IOException if the file name is not a valid sequence file
+ */
+ @InterfaceAudience.Private
+ public static int extractKeySequence(Path clusterKeyFile) throws IOException {
+ int keySeq = -1;
+ if (clusterKeyFile.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) {
+ String seqStr = clusterKeyFile.getName().substring(SYSTEM_KEY_FILE_PREFIX.length());
+ if (! seqStr.isEmpty()) {
+ try {
+ keySeq = Integer.parseInt(seqStr);
+ } catch (NumberFormatException e) {
+ throw new IOException("Invalid file name for a cluster key: " + clusterKeyFile, e);
+ }
+ }
+ }
+ return keySeq;
+ }
+
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ try (FSDataInputStream fin = getServer().getFileSystem().open(keyPath)) {
+ return fin.readUTF();
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
new file mode 100644
index 000000000000..d1e3eb048a9b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings("checkstyle:FinalClass") // as otherwise it breaks mocking.
+@InterfaceAudience.Private
+public class SystemKeyCache {
+ private static final Logger LOG = LoggerFactory.getLogger(SystemKeyCache.class);
+
+ private final ManagedKeyData latestSystemKey;
+ private final Map systemKeys;
+
+ /**
+ * Construct the System Key cache from the specified accessor.
+ * @param accessor the accessor to use to load the system keys
+ * @return the cache or {@code null} if no keys are found.
+ * @throws IOException if there is an error loading the system keys
+ */
+ public static SystemKeyCache createCache(SystemKeyAccessor accessor) throws IOException {
+ List allSystemKeyFiles = accessor.getAllSystemKeyFiles();
+ if (allSystemKeyFiles.isEmpty()) {
+ LOG.warn("No system key files found, skipping cache creation");
+ return null;
+ }
+ ManagedKeyData latestSystemKey = null;
+ Map systemKeys = new TreeMap<>();
+ for (Path keyPath: allSystemKeyFiles) {
+ LOG.info("Loading system key from: {}", keyPath);
+ ManagedKeyData keyData = accessor.loadSystemKey(keyPath);
+ if (latestSystemKey == null) {
+ latestSystemKey = keyData;
+ }
+ systemKeys.put(keyData.getKeyChecksum(), keyData);
+ }
+ return new SystemKeyCache(systemKeys, latestSystemKey);
+ }
+
+ private SystemKeyCache(Map systemKeys, ManagedKeyData latestSystemKey) {
+ this.systemKeys = systemKeys;
+ this.latestSystemKey = latestSystemKey;
+ }
+
+ public ManagedKeyData getLatestSystemKey() {
+ return latestSystemKey;
+ }
+
+ public ManagedKeyData getSystemKeyByChecksum(long checksum) {
+ return systemKeys.get(checksum);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 19f58ebe6ad0..eb89395d6c73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -122,6 +122,7 @@
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.keymeta.KeymetaMasterService;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -356,6 +357,8 @@ public class HMaster extends HBaseServerBase implements Maste
// file system manager for the master FS operations
private MasterFileSystem fileSystemManager;
private MasterWalManager walManager;
+ private SystemKeyManager systemKeyManager;
+ private KeymetaMasterService keymetaMasterService;
// manager to manage procedure-based WAL splitting, can be null if current
// is zk-based WAL splitting. SplitWALManager will replace SplitLogManager
@@ -993,6 +996,10 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.clusterId = clusterId.toString();
+ systemKeyManager = new SystemKeyManager(this);
+ systemKeyManager.ensureSystemKeyInitialized();
+ buildSystemKeyCache();
+
// Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their
// hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set
// hbase.write.hbck1.lock.file to false.
@@ -1032,6 +1039,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
Map, List>> procsByType = procedureExecutor
.getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass()));
+ keymetaMasterService = new KeymetaMasterService(this);
+ keymetaMasterService.init();
+
// Create Assignment Manager
this.assignmentManager = createAssignmentManager(this, masterRegion);
this.assignmentManager.start();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 5a43cd98feb9..0ffbfd15c41d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -66,6 +66,7 @@ public class MasterFileSystem {
private final FileSystem walFs;
// root log directory on the FS
private final Path rootdir;
+ private final Path systemKeyDir;
// hbase temp directory used for table construction and deletion
private final Path tempdir;
// root hbase directory on the FS
@@ -96,6 +97,7 @@ public MasterFileSystem(Configuration conf) throws IOException {
// default localfs. Presumption is that rootdir is fully-qualified before
// we get to here with appropriate fs scheme.
this.rootdir = CommonFSUtils.getRootDir(conf);
+ this.systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
@@ -134,6 +136,7 @@ private void createInitialFileSystemLayout() throws IOException {
HConstants.CORRUPT_DIR_NAME, ReplicationUtils.REMOTE_WAL_DIR_NAME };
// check if the root directory exists
checkRootDir(this.rootdir, conf, this.fs);
+ checkSubDir(this.systemKeyDir, HBASE_DIR_PERMS);
// Check the directories under rootdir.
checkTempDir(this.tempdir, conf, this.fs);
@@ -158,6 +161,7 @@ private void createInitialFileSystemLayout() throws IOException {
if (isSecurityEnabled) {
fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms);
fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
+ fs.setPermission(systemKeyDir, secureRootFilePerms);
}
FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission();
if (
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
index 18dfc7d493bf..99a373c8262f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
@@ -21,7 +21,6 @@
import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER;
import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER;
import static org.apache.hadoop.hbase.master.MasterWalManager.NON_META_FILTER;
-
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
new file mode 100644
index 000000000000..45b021c77feb
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class SystemKeyManager extends SystemKeyAccessor {
+ private final MasterServices master;
+
+ public SystemKeyManager(MasterServices master) throws IOException {
+ super(master);
+ this.master = master;
+ }
+
+ public void ensureSystemKeyInitialized() throws IOException {
+ if (! isKeyManagementEnabled()) {
+ return;
+ }
+ List clusterKeys = getAllSystemKeyFiles();
+ if (clusterKeys.isEmpty()) {
+ LOG.info("Initializing System Key for the first time");
+ // Double check for cluster key as another HMaster might have succeeded.
+ if (rotateSystemKey(null, clusterKeys) == null &&
+ getAllSystemKeyFiles().isEmpty()) {
+ throw new RuntimeException("Failed to generate or save System Key");
+ }
+ }
+ else if (rotateSystemKeyIfChanged() != null) {
+ LOG.info("System key has been rotated");
+ }
+ else {
+ LOG.info("System key is already initialized and unchanged");
+ }
+ }
+
+ public ManagedKeyData rotateSystemKeyIfChanged() throws IOException {
+ if (! isKeyManagementEnabled()) {
+ return null;
+ }
+ Pair> latestFileResult = getLatestSystemKeyFile();
+ Path latestFile = getLatestSystemKeyFile().getFirst();
+ String latestKeyMetadata = loadKeyMetadata(latestFile);
+ return rotateSystemKey(latestKeyMetadata, latestFileResult.getSecond());
+ }
+
+ private ManagedKeyData rotateSystemKey(String currentKeyMetadata, List allSystemKeyFiles)
+ throws IOException {
+ ManagedKeyProvider provider = getKeyProvider();
+ ManagedKeyData clusterKey = provider.getSystemKey(
+ master.getMasterFileSystem().getClusterId().toString().getBytes());
+ if (clusterKey == null) {
+ throw new IOException("Failed to get system key for cluster id: " +
+ master.getMasterFileSystem().getClusterId().toString());
+ }
+ if (clusterKey.getKeyState() != ManagedKeyState.ACTIVE) {
+ throw new IOException("System key is expected to be ACTIVE but it is: " +
+ clusterKey.getKeyState() + " for metadata: " + clusterKey.getKeyMetadata());
+ }
+ if (clusterKey.getKeyMetadata() == null) {
+ throw new IOException("System key is expected to have metadata but it is null");
+ }
+ if (! clusterKey.getKeyMetadata().equals(currentKeyMetadata) &&
+ saveLatestSystemKey(clusterKey.getKeyMetadata(), allSystemKeyFiles)) {
+ return clusterKey;
+ }
+ return null;
+ }
+
+ private boolean saveLatestSystemKey(String keyMetadata, List allSystemKeyFiles)
+ throws IOException {
+ int nextSystemKeySeq = (allSystemKeyFiles.isEmpty() ? -1
+ : SystemKeyAccessor.extractKeySequence(allSystemKeyFiles.get(0))) + 1;
+ LOG.info("Trying to save a new cluster key at seq: {}", nextSystemKeySeq);
+ MasterFileSystem masterFS = master.getMasterFileSystem();
+ Path nextSystemKeyPath = new Path(systemKeyDir,
+ SYSTEM_KEY_FILE_PREFIX + nextSystemKeySeq);
+ Path tempSystemKeyFile = new Path(masterFS.getTempDir(),
+ nextSystemKeyPath.getName() + UUID.randomUUID());
+ try (FSDataOutputStream fsDataOutputStream = masterFS.getFileSystem()
+ .create(tempSystemKeyFile)) {
+ fsDataOutputStream.writeUTF(keyMetadata);
+ boolean succeeded = masterFS.getFileSystem().rename(tempSystemKeyFile, nextSystemKeyPath);
+ if (succeeded) {
+ LOG.info("System key save succeeded for seq: {}", nextSystemKeySeq);
+ } else {
+ LOG.error("System key save failed for seq: {}", nextSystemKeySeq);
+ }
+ return succeeded;
+ }
+ finally {
+ masterFS.getFileSystem().delete(tempSystemKeyFile, false);
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2bddf7f9d275..b2e903a6394a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -120,6 +120,7 @@
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.mob.RSMobFileCleanerChore;
@@ -596,7 +597,6 @@ protected RegionServerCoprocessorHost getCoprocessorHost() {
return getRegionServerCoprocessorHost();
}
- @Override
protected boolean canCreateBaseZNode() {
return !clusterMode();
}
@@ -1449,6 +1449,9 @@ protected void handleReportForDutyResponse(final RegionServerStartupResponse c)
initializeFileSystem();
}
+ buildSystemKeyCache();
+ managedKeyDataCache = new ManagedKeyDataCache(this.getConfiguration(), keymetaAdmin);
+
// hack! Maps DFSClient => RegionServer for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
if (this.conf.get("mapreduce.task.attempt.id") == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index a4ca20fa7311..65e8aa5e66e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -41,6 +41,9 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.master.replication.OfflineTableReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationGroupOffset;
@@ -366,6 +369,18 @@ public ChoreService getChoreService() {
return null;
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public FileSystem getFileSystem() {
return null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
index 5f9433a3f141..92b5f340a610 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.security;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@@ -45,4 +47,14 @@ public static String getPrincipalWithoutRealm(final String principal) {
int i = principal.indexOf("@");
return (i > -1) ? principal.substring(0, i) : principal;
}
+
+ /**
+ * From the given configuration, determine if key management is enabled.
+ * @param conf the configuration to check
+ * @return true if key management is enabled
+ */
+ public static boolean isKeyManagementEnabled(Configuration conf) {
+ return conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED);
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
new file mode 100644
index 000000000000..2d8ae446da3a
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.security.Key;
+
+import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+
+public class DummyKeyProvider implements KeyProvider {
+ @Override
+ public void init(String params) {
+ }
+
+ @Override
+ public Key[] getKeys(String[] aliases) {
+ return null;
+ }
+
+ @Override
+ public Key getKey(String alias) {
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
new file mode 100644
index 000000000000..8e428c163127
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.Key;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.mockito.Mockito;
+
+public class ManagedKeyProviderInterceptor extends MockManagedKeyProvider {
+ public final MockManagedKeyProvider delegate;
+ public final MockManagedKeyProvider spy;
+
+ public ManagedKeyProviderInterceptor() {
+ this.delegate = new MockManagedKeyProvider();
+ this.spy = Mockito.spy(delegate);
+ }
+
+ @Override
+ public void initConfig(Configuration conf) {
+ spy.initConfig(conf);
+ }
+
+ @Override
+ public ManagedKeyData getManagedKey(byte[] custodian, String namespace) throws IOException {
+ return spy.getManagedKey(custodian, namespace);
+ }
+
+ @Override
+ public ManagedKeyData getSystemKey(byte[] systemId) throws IOException {
+ return spy.getSystemKey(systemId);
+ }
+
+ @Override
+ public ManagedKeyData unwrapKey(String keyMetadata, byte[] wrappedKey) throws IOException {
+ return spy.unwrapKey(keyMetadata, wrappedKey);
+ }
+
+ @Override
+ public void init(String params) {
+ spy.init(params);
+ }
+
+ @Override
+ public Key getKey(String alias) {
+ return spy.getKey(alias);
+ }
+
+ @Override
+ public Key[] getKeys(String[] aliases) {
+ return spy.getKeys(aliases);
+ }
+
+ @Override
+ public void setMockedKeyState(String alias, ManagedKeyState state) {
+ delegate.setMockedKeyState(alias, state);
+ }
+
+ @Override
+ public void setMultikeyGenMode(boolean multikeyGenMode) {
+ delegate.setMultikeyGenMode(multikeyGenMode);
+ }
+
+ @Override
+ public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace) {
+ return delegate.getLastGeneratedKeyData(alias, keyNamespace);
+ }
+
+ @Override
+ public void setMockedKey(String alias, java.security.Key key, String keyNamespace) {
+ delegate.setMockedKey(alias, key, keyNamespace);
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
new file mode 100644
index 000000000000..a0147e6e4e2e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.junit.After;
+import org.junit.Before;
+
+public class ManagedKeyTestBase {
+ protected HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+ @Before
+ public void setUp() throws Exception {
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes",
+ KeymetaServiceEndpoint.class.getName());
+
+ // Start the minicluster
+ TEST_UTIL.startMiniCluster(1);
+ TEST_UTIL.waitFor(60000,
+ () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
+ TEST_UTIL.waitUntilAllRegionsAssigned(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
new file mode 100644
index 000000000000..ab871b241830
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeyManagementBase {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(
+ TestKeyManagementBase.class);
+
+ @Test
+ public void testGetKeyProviderWithInvalidProvider() throws Exception {
+ // Setup configuration with a non-ManagedKeyProvider
+ Configuration conf = new Configuration();
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
+ "org.apache.hadoop.hbase.keymeta.DummyKeyProvider");
+
+ Server mockServer = mock(Server.class);
+ when(mockServer.getConfiguration()).thenReturn(conf);
+
+ KeyManagementBase keyMgmt = new TestKeyManagement(mockServer);
+
+ // Should throw RuntimeException when provider is not ManagedKeyProvider
+ RuntimeException exception = assertThrows(RuntimeException.class, () -> {
+ keyMgmt.getKeyProvider();
+ });
+
+ assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider"));
+ }
+
+ private static class TestKeyManagement extends KeyManagementBase {
+ public TestKeyManagement(Server server) {
+ super(server);
+ }
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
new file mode 100644
index 000000000000..7070596a93c0
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
@@ -0,0 +1,333 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyState.KEY_ACTIVE;
+import static org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyState.KEY_FAILED;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.contains;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.withSettings;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.List;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.keymeta.KeymetaServiceEndpoint.KeymetaAdminServiceImpl;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.GetManagedKeysResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaEndpoint {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaEndpoint.class);
+
+ private static final String KEY_CUST = "keyCust";
+ private static final String KEY_NAMESPACE = "keyNamespace";
+ private static final String KEY_METADATA1 = "keyMetadata1";
+ private static final String KEY_METADATA2 = "keyMetadata2";
+
+ @Mock
+ private RpcController controller;
+ @Mock
+ private MasterServices master;
+ @Mock
+ private RpcCallback done;
+ @Mock
+ private KeymetaAdmin keymetaAdmin;
+
+ KeymetaServiceEndpoint keymetaServiceEndpoint;
+ private ManagedKeysResponse.Builder responseBuilder;
+ private ManagedKeysRequest.Builder requestBuilder;
+ private KeymetaAdminServiceImpl keyMetaAdminService;
+ private ManagedKeyData keyData1;
+ private ManagedKeyData keyData2;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ keymetaServiceEndpoint = new KeymetaServiceEndpoint();
+ CoprocessorEnvironment env = mock(CoprocessorEnvironment.class,
+ withSettings().extraInterfaces(HasMasterServices.class));
+ when(((HasMasterServices) env).getMasterServices()).thenReturn(master);
+ keymetaServiceEndpoint.start(env);
+ keyMetaAdminService = (KeymetaAdminServiceImpl) keymetaServiceEndpoint.getServices()
+ .iterator().next();
+ responseBuilder = ManagedKeysResponse.newBuilder().setKeyState(KEY_ACTIVE);
+ requestBuilder = ManagedKeysRequest.newBuilder()
+ .setKeyNamespace(ManagedKeyData.KEY_SPACE_GLOBAL);
+ keyData1 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE,
+ new SecretKeySpec("key1".getBytes(), "AES"), ACTIVE, KEY_METADATA1);
+ keyData2 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE,
+ new SecretKeySpec("key2".getBytes(), "AES"), ACTIVE, KEY_METADATA2);
+ when(master.getKeymetaAdmin()).thenReturn(keymetaAdmin);
+ }
+
+ @Test
+ public void testConvertToKeyCustBytesValid() {
+ // Arrange
+ String validBase64 = Base64.getEncoder().encodeToString("testKey".getBytes());
+ ManagedKeysRequest request = requestBuilder.setKeyCust(validBase64).build();
+
+ // Act
+ byte[] result =
+ KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request, responseBuilder);
+
+ // Assert
+ assertNotNull(result);
+ assertArrayEquals("testKey".getBytes(), result);
+ assertEquals(KEY_ACTIVE, responseBuilder.getKeyState());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testConvertToKeyCustBytesInvalid() {
+ // Arrange
+ String invalidBase64 = "invalid!Base64@String";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build();
+
+ // Act
+ byte[] result = KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request,
+ responseBuilder);
+
+ // Assert
+ assertNull(result);
+ assertEquals(KEY_FAILED, responseBuilder.getKeyState());
+ verify(controller).setFailed(anyString());
+ }
+
+ @Test
+ public void testGetResponseBuilder() {
+ // Arrange
+ String keyCust = Base64.getEncoder().encodeToString("testKey".getBytes());
+ String keyNamespace = "testNamespace";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust)
+ .setKeyNamespace(keyNamespace)
+ .build();
+
+ // Act
+ ManagedKeysResponse.Builder result = KeymetaServiceEndpoint.getResponseBuilder(controller,
+ request);
+
+ // Assert
+ assertNotNull(result);
+ assertEquals(keyNamespace, result.getKeyNamespace());
+ assertArrayEquals("testKey".getBytes(), result.getKeyCustBytes().toByteArray());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testGetResponseBuilderWithInvalidBase64() {
+ // Arrange
+ String keyCust = "invalidBase64!";
+ String keyNamespace = "testNamespace";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust)
+ .setKeyNamespace(keyNamespace)
+ .build();
+
+ // Act
+ ManagedKeysResponse.Builder result = KeymetaServiceEndpoint.getResponseBuilder(controller,
+ request);
+
+ // Assert
+ assertNotNull(result);
+ assertEquals(keyNamespace, result.getKeyNamespace());
+ assertEquals(KEY_FAILED, result.getKeyState());
+ verify(controller).setFailed(contains("Failed to decode specified prefix as Base64 string"));
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse() throws Exception {
+ // Arrange
+ ManagedKeysResponse response = responseBuilder.setKeyCustBytes(ByteString.copyFrom(
+ keyData1.getKeyCustodian()))
+ .setKeyNamespace(keyData1.getKeyNamespace())
+ .build();
+ List managedKeyStates = Arrays.asList(keyData1, keyData2);
+
+ // Act
+ GetManagedKeysResponse result = KeymetaServiceEndpoint.generateKeyStateResponse(
+ managedKeyStates, responseBuilder);
+
+ // Assert
+ assertNotNull(response);
+ assertNotNull(result.getStateList());
+ assertEquals(2, result.getStateList().size());
+ assertEquals(KEY_ACTIVE, result.getStateList().get(0).getKeyState());
+ assertEquals(0, Bytes.compareTo(keyData1.getKeyCustodian(),
+ result.getStateList().get(0).getKeyCustBytes().toByteArray()));
+ assertEquals(keyData1.getKeyNamespace(), result.getStateList().get(0).getKeyNamespace());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_Empty() throws Exception {
+ // Arrange
+ ManagedKeysResponse response = responseBuilder.setKeyCustBytes(ByteString.copyFrom(
+ keyData1.getKeyCustodian()))
+ .setKeyNamespace(keyData1.getKeyNamespace())
+ .build();
+ List managedKeyStates = new ArrayList<>();
+
+ // Act
+ GetManagedKeysResponse result = KeymetaServiceEndpoint.generateKeyStateResponse(
+ managedKeyStates, responseBuilder);
+
+ // Assert
+ assertNotNull(response);
+ assertNotNull(result.getStateList());
+ assertEquals(0, result.getStateList().size());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testGenerateKeyStatResponse_Success() throws Exception {
+ doTestServiceCallForSuccess(
+ (controller, request, done) ->
+ keyMetaAdminService.enableKeyManagement(controller, request, done));
+ }
+
+ @Test
+ public void testGetManagedKeys_Success() throws Exception {
+ doTestServiceCallForSuccess(
+ (controller, request, done) ->
+ keyMetaAdminService.getManagedKeys(controller, request, done));
+ }
+
+ private void doTestServiceCallForSuccess(ServiceCall svc) throws Exception {
+ // Arrange
+ ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build();
+ List managedKeyStates = Arrays.asList(keyData1);
+ when(keymetaAdmin.enableKeyManagement(any(), any())).thenReturn(managedKeyStates);
+
+ // Act
+ svc.call(controller, request, done);
+
+ // Assert
+ verify(done).run(any());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ private interface ServiceCall {
+ void call(RpcController controller, ManagedKeysRequest request,
+ RpcCallback done) throws Exception;
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_InvalidCust() throws Exception {
+ // Arrange
+ String invalidBase64 = "invalid!Base64@String";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build();
+
+ // Act
+ keyMetaAdminService.enableKeyManagement(controller, request, done);
+
+ // Assert
+ verify(controller).setFailed(contains("IOException"));
+ verify(keymetaAdmin, never()).enableKeyManagement(any(), any());
+ verify(done, never()).run(any());
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_IOException() throws Exception {
+ // Arrange
+ when(keymetaAdmin.enableKeyManagement(any(), any())).thenThrow(IOException.class);
+ ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build();
+
+ // Act
+ keyMetaAdminService.enableKeyManagement(controller, request, done);
+
+ // Assert
+ verify(controller).setFailed(contains("IOException"));
+ verify(keymetaAdmin).enableKeyManagement(any(), any());
+ verify(done, never()).run(any());
+ }
+
+ @Test
+ public void testGetManagedKeys_IOException() throws Exception {
+ doTestGetManagedKeysError(IOException.class);
+ }
+
+ @Test
+ public void testGetManagedKeys_KeyException() throws Exception {
+ doTestGetManagedKeysError(KeyException.class);
+ }
+
+ private void doTestGetManagedKeysError(Class extends Exception> exType) throws Exception {
+ // Arrange
+ when(keymetaAdmin.getManagedKeys(any(), any())).thenThrow(exType);
+ ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build();
+
+ // Act
+ keyMetaAdminService.getManagedKeys(controller, request, done);
+
+ // Assert
+ verify(controller).setFailed(contains(exType.getSimpleName()));
+ verify(keymetaAdmin).getManagedKeys(any(), any());
+ verify(done, never()).run(any());
+ }
+
+ @Test
+ public void testGetManagedKeys_InvalidCust() throws Exception {
+ // Arrange
+ String invalidBase64 = "invalid!Base64@String";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build();
+
+ // Act
+ keyMetaAdminService.getManagedKeys(controller, request, done);
+
+ // Assert
+ verify(controller).setFailed(contains("IOException"));
+ verify(keymetaAdmin, never()).getManagedKeys(any(), any());
+ verify(done, never()).run(any());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java
new file mode 100644
index 000000000000..f34d482d7940
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+/**
+ * Tests for KeymetaMasterService class
+ */
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaMasterService {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaMasterService.class);
+
+ @Mock
+ private MasterServices mockMaster;
+ @Mock
+ private TableDescriptors mockTableDescriptors;
+
+ private Configuration conf;
+ private KeymetaMasterService service;
+ private AutoCloseable closeableMocks;
+
+ @Before
+ public void setUp() throws Exception {
+ closeableMocks = MockitoAnnotations.openMocks(this);
+
+ conf = new Configuration();
+ when(mockMaster.getConfiguration()).thenReturn(conf);
+ when(mockMaster.getTableDescriptors()).thenReturn(mockTableDescriptors);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (closeableMocks != null) {
+ closeableMocks.close();
+ }
+ }
+
+ @Test
+ public void testInitWithKeyManagementDisabled() throws Exception {
+ // Setup - disable key management
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute
+ service.init(); // Should return early without creating table
+
+ // Verify - no table operations should be performed
+ verify(mockMaster, never()).getTableDescriptors();
+ verify(mockMaster, never()).createSystemTable(any());
+ }
+
+ @Test
+ public void testInitWithKeyManagementEnabledAndTableExists() throws Exception {
+ // Setup - enable key management and table already exists
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(true);
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute
+ service.init();
+
+ // Verify - table exists check is performed but no table creation
+ verify(mockMaster).getTableDescriptors();
+ verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster, never()).createSystemTable(any());
+ }
+
+ @Test
+ public void testInitWithKeyManagementEnabledAndTableDoesNotExist() throws Exception {
+ // Setup - enable key management and table does not exist
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(false);
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute
+ service.init();
+
+ // Verify - table is created
+ verify(mockMaster).getTableDescriptors();
+ verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster).createSystemTable(any(TableDescriptor.class));
+ }
+
+ @Test
+ public void testInitWithTableDescriptorsIOException() throws Exception {
+ // Setup - enable key management but table descriptors throws IOException
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(any(TableName.class)))
+ .thenThrow(new IOException("Table descriptors error"));
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute & Verify - IOException should propagate
+ try {
+ service.init();
+ } catch (IOException e) {
+ // Expected exception
+ }
+
+ verify(mockMaster).getTableDescriptors();
+ verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster, never()).createSystemTable(any());
+ }
+
+ @Test
+ public void testInitWithCreateSystemTableIOException() throws Exception {
+ // Setup - enable key management, table doesn't exist, but createSystemTable throws IOException
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(false);
+ when(mockMaster.createSystemTable(any(TableDescriptor.class)))
+ .thenThrow(new IOException("Create table error"));
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute & Verify - IOException should propagate
+ try {
+ service.init();
+ } catch (IOException e) {
+ // Expected exception
+ }
+
+ verify(mockMaster).getTableDescriptors();
+ verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster).createSystemTable(any(TableDescriptor.class));
+ }
+
+ @Test
+ public void testConstructorWithMasterServices() throws Exception {
+ // Execute
+ service = new KeymetaMasterService(mockMaster);
+
+ // Verify - constructor should not throw an exception
+ // The service should be created successfully (no exceptions = success)
+ // We don't verify internal calls since the constructor just stores references
+ }
+
+ @Test
+ public void testMultipleInitCalls() throws Exception {
+ // Setup - enable key management and table exists
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(true);
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute - call init multiple times
+ service.init();
+ service.init();
+ service.init();
+
+ // Verify - each call should check table existence (idempotent behavior)
+ verify(mockMaster, times(3)).getTableDescriptors();
+ verify(mockTableDescriptors, times(3)).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster, never()).createSystemTable(any());
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
new file mode 100644
index 000000000000..3b3c4c23dc7d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
@@ -0,0 +1,437 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_CHECKSUM_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_METADATA_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_WRAPPED_BY_STK_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.KEY_META_INFO_FAMILY;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.KEY_STATE_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.REFRESHED_TIMESTAMP_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.STK_CHECKSUM_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.constructRowKeyForCustNamespace;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.constructRowKeyForMetadata;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.parseFromResult;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Suite;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+ TestKeymetaTableAccessor.TestAdd.class,
+ TestKeymetaTableAccessor.TestAddWithNullableFields.class,
+ TestKeymetaTableAccessor.TestGet.class,
+})
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaTableAccessor {
+ protected static final String ALIAS = "custId1";
+ protected static final byte[] CUST_ID = ALIAS.getBytes();
+ protected static final String KEY_NAMESPACE = "namespace";
+ protected static String KEY_METADATA = "metadata1";
+
+ @Mock
+ protected Server server;
+ @Mock
+ protected Connection connection;
+ @Mock
+ protected Table table;
+ @Mock
+ protected ResultScanner scanner;
+ @Mock
+ protected SystemKeyCache systemKeyCache;
+
+ protected KeymetaTableAccessor accessor;
+ protected Configuration conf = HBaseConfiguration.create();
+ protected MockManagedKeyProvider managedKeyProvider;
+ protected ManagedKeyData latestSystemKey;
+
+ private AutoCloseable closeableMocks;
+
+ @Before
+ public void setUp() throws Exception {
+ closeableMocks = MockitoAnnotations.openMocks(this);
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName());
+
+ when(server.getConnection()).thenReturn(connection);
+ when(connection.getTable(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(table);
+ when(server.getSystemKeyCache()).thenReturn(systemKeyCache);
+ when(server.getConfiguration()).thenReturn(conf);
+
+ accessor = new KeymetaTableAccessor(server);
+ managedKeyProvider = new MockManagedKeyProvider();
+ managedKeyProvider.initConfig(conf);
+
+ latestSystemKey = managedKeyProvider.getSystemKey("system-id".getBytes());
+ when(systemKeyCache.getLatestSystemKey()).thenReturn(latestSystemKey);
+ when(systemKeyCache.getSystemKeyByChecksum(anyLong())).thenReturn(latestSystemKey);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ closeableMocks.close();
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAdd extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAdd.class);
+
+ @Parameter(0)
+ public ManagedKeyState keyState;
+
+ @Parameterized.Parameters(name = "{index},keyState={0}")
+ public static Collection data() {
+ return Arrays.asList(
+ new Object[][] { { ACTIVE }, { FAILED }, { INACTIVE }, { DISABLED }, });
+ }
+
+ @Test
+ public void testAddKey() throws Exception {
+ managedKeyProvider.setMockedKeyState(ALIAS, keyState);
+ ManagedKeyData keyData =
+ managedKeyProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+
+ accessor.addKey(keyData);
+
+ ArgumentCaptor> putCaptor = ArgumentCaptor.forClass(ArrayList.class);
+ verify(table).put(putCaptor.capture());
+ List puts = putCaptor.getValue();
+ assertEquals(keyState == ACTIVE ? 2 : 1, puts.size());
+ if (keyState == ACTIVE) {
+ assertPut(keyData, puts.get(0), constructRowKeyForCustNamespace(keyData));
+ assertPut(keyData, puts.get(1), constructRowKeyForMetadata(keyData));
+ }
+ else {
+ assertPut(keyData, puts.get(0), constructRowKeyForMetadata(keyData));
+ }
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAddWithNullableFields extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAddWithNullableFields.class);
+
+ @Test
+ public void testAddKeyWithFailedStateAndNullMetadata() throws Exception {
+ managedKeyProvider.setMockedKeyState(ALIAS, FAILED);
+ ManagedKeyData keyData = new ManagedKeyData(CUST_ID, KEY_SPACE_GLOBAL, null, FAILED, null);
+
+ accessor.addKey(keyData);
+
+ ArgumentCaptor> putCaptor = ArgumentCaptor.forClass(ArrayList.class);
+ verify(table).put(putCaptor.capture());
+ List puts = putCaptor.getValue();
+ assertEquals(1, puts.size());
+ Put put = puts.get(0);
+
+ // Verify the row key uses state value for metadata hash
+ byte[] expectedRowKey = constructRowKeyForMetadata(CUST_ID, KEY_SPACE_GLOBAL,
+ new byte[] { FAILED.getVal() });
+ assertEquals(0, Bytes.compareTo(expectedRowKey, put.getRow()));
+
+ Map valueMap = getValueMap(put);
+
+ // Verify key-related columns are not present
+ assertNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES)));
+ assertNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES)));
+ assertNull(valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES)));
+
+ // Verify state is set correctly
+ assertEquals(new Bytes(new byte[] { FAILED.getVal() }),
+ valueMap.get(new Bytes(KEY_STATE_QUAL_BYTES)));
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestGet extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestGet.class);
+
+ @Mock
+ private Result result1;
+ @Mock
+ private Result result2;
+
+ private String keyMetadata2 = "metadata2";
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+
+ when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES)))
+ .thenReturn(new byte[] { ACTIVE.getVal() });
+ when(result2.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES)))
+ .thenReturn(new byte[] { FAILED.getVal() });
+ for (Result result : Arrays.asList(result1, result2)) {
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(REFRESHED_TIMESTAMP_QUAL_BYTES)))
+ .thenReturn(Bytes.toBytes(0L));
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(STK_CHECKSUM_QUAL_BYTES)))
+ .thenReturn(Bytes.toBytes(0L));
+ }
+ when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES)))
+ .thenReturn(KEY_METADATA.getBytes());
+ when(result2.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES)))
+ .thenReturn(keyMetadata2.getBytes());
+ }
+
+ @Test
+ public void testParseEmptyResult() throws Exception {
+ Result result = mock(Result.class);
+ when(result.isEmpty()).thenReturn(true);
+
+ assertNull(parseFromResult(server, CUST_ID, KEY_NAMESPACE, null));
+ assertNull(parseFromResult(server, CUST_ID, KEY_NAMESPACE, result));
+ }
+
+ @Test
+ public void testGetActiveKeyMissingWrappedKey() throws Exception {
+ Result result = mock(Result.class);
+ when(table.get(any(Get.class))).thenReturn(result);
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES)))
+ .thenReturn(new byte[] { ACTIVE.getVal() }, new byte[] { INACTIVE.getVal() });
+
+ IOException ex;
+ ex = assertThrows(IOException.class,
+ () -> accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, KEY_METADATA));
+ assertEquals("ACTIVE key must have a wrapped key", ex.getMessage());
+ ex = assertThrows(IOException.class, () ->
+ accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, KEY_METADATA));
+ assertEquals("INACTIVE key must have a wrapped key", ex.getMessage());
+ }
+
+ @Test
+ public void testGetKeyMissingSTK() throws Exception {
+ when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_WRAPPED_BY_STK_QUAL_BYTES)))
+ .thenReturn(new byte[] { 0 });
+ when(systemKeyCache.getSystemKeyByChecksum(anyLong())).thenReturn(null);
+ when(table.get(any(Get.class))).thenReturn(result1);
+
+ ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA);
+
+ assertNull(result);
+ }
+
+ @Test
+ public void testGetKeyWithWrappedKey() throws Exception {
+ ManagedKeyData keyData = setupActiveKey(CUST_ID, result1);
+
+ ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA);
+
+ verify(table).get(any(Get.class));
+ assertNotNull(result);
+ assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian()));
+ assertEquals(KEY_NAMESPACE, result.getKeyNamespace());
+ assertEquals(keyData.getKeyMetadata(), result.getKeyMetadata());
+ assertEquals(0, Bytes.compareTo(keyData.getTheKey().getEncoded(),
+ result.getTheKey().getEncoded()));
+ assertEquals(ACTIVE, result.getKeyState());
+
+ // When DEK checksum doesn't match, we expect a null value.
+ result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA);
+ assertNull(result);
+ }
+
+ @Test
+ public void testGetKeyWithFailedState() throws Exception {
+ // Test with FAILED state and null metadata
+ Result failedResult = mock(Result.class);
+ when(failedResult.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES)))
+ .thenReturn(new byte[] { FAILED.getVal() });
+ when(failedResult.getValue(eq(KEY_META_INFO_FAMILY), eq(REFRESHED_TIMESTAMP_QUAL_BYTES)))
+ .thenReturn(Bytes.toBytes(0L));
+ when(failedResult.getValue(eq(KEY_META_INFO_FAMILY), eq(STK_CHECKSUM_QUAL_BYTES)))
+ .thenReturn(Bytes.toBytes(0L));
+ // Explicitly return null for metadata to simulate FAILED state with null metadata
+ when(failedResult.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES)))
+ .thenReturn(null);
+
+ when(table.get(any(Get.class))).thenReturn(failedResult);
+ ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, FAILED);
+
+ verify(table).get(any(Get.class));
+ assertNotNull(result);
+ assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian()));
+ assertEquals(KEY_NAMESPACE, result.getKeyNamespace());
+ assertNull(result.getKeyMetadata());
+ assertNull(result.getTheKey());
+ assertEquals(FAILED, result.getKeyState());
+ }
+
+ @Test
+ public void testGetKeyWithoutWrappedKey() throws Exception {
+ when(table.get(any(Get.class))).thenReturn(result2);
+
+ ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA);
+
+ verify(table).get(any(Get.class));
+ assertNotNull(result);
+ assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian()));
+ assertEquals(KEY_NAMESPACE, result.getKeyNamespace());
+ assertEquals(keyMetadata2, result.getKeyMetadata());
+ assertNull(result.getTheKey());
+ assertEquals(FAILED, result.getKeyState());
+ }
+
+ @Test
+ public void testGetAllKeys() throws Exception {
+ ManagedKeyData keyData = setupActiveKey(CUST_ID, result1);
+
+ when(scanner.iterator()).thenReturn(List.of(result1, result2).iterator());
+ when(table.getScanner(any(Scan.class))).thenReturn(scanner);
+
+ List allKeys = accessor.getAllKeys(CUST_ID, KEY_NAMESPACE);
+
+ assertEquals(2, allKeys.size());
+ assertEquals(keyData.getKeyMetadata(), allKeys.get(0).getKeyMetadata());
+ assertEquals(keyMetadata2, allKeys.get(1).getKeyMetadata());
+ verify(table).getScanner(any(Scan.class));
+ }
+
+ @Test
+ public void testGetActiveKey() throws Exception {
+ ManagedKeyData keyData = setupActiveKey(CUST_ID, result1);
+
+ when(scanner.iterator()).thenReturn(List.of(result1).iterator());
+ when(table.get(any(Get.class))).thenReturn(result1);
+
+ ManagedKeyData activeKey = accessor.getActiveKey(CUST_ID, KEY_NAMESPACE);
+
+ assertNotNull(activeKey);
+ assertEquals(keyData, activeKey);
+ verify(table).get(any(Get.class));
+ }
+
+ private ManagedKeyData setupActiveKey(byte[] custId, Result result) throws Exception {
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(custId, KEY_NAMESPACE);
+ byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(conf, null,
+ keyData.getTheKey(), latestSystemKey.getTheKey());
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_WRAPPED_BY_STK_QUAL_BYTES)))
+ .thenReturn(dekWrappedBySTK);
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_CHECKSUM_QUAL_BYTES)))
+ .thenReturn(Bytes.toBytes(keyData.getKeyChecksum()), Bytes.toBytes(0L));
+ // Update the mock to return the correct metadata from the keyData
+ when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES)))
+ .thenReturn(keyData.getKeyMetadata().getBytes());
+ when(table.get(any(Get.class))).thenReturn(result);
+ return keyData;
+ }
+ }
+
+ protected void assertPut(ManagedKeyData keyData, Put put, byte[] rowKey) {
+ assertEquals(Durability.SKIP_WAL, put.getDurability());
+ assertEquals(HConstants.SYSTEMTABLE_QOS, put.getPriority());
+ assertTrue(Bytes.compareTo(rowKey, put.getRow()) == 0);
+
+ Map valueMap = getValueMap(put);
+
+ if (keyData.getTheKey() != null) {
+ assertNotNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES)));
+ assertNotNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES)));
+ assertEquals(new Bytes(Bytes.toBytes(latestSystemKey.getKeyChecksum())),
+ valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES)));
+ }
+ else {
+ assertNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES)));
+ assertNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES)));
+ assertNull(valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES)));
+ }
+ assertEquals(new Bytes(keyData.getKeyMetadata().getBytes()),
+ valueMap.get(new Bytes(DEK_METADATA_QUAL_BYTES)));
+ assertNotNull(valueMap.get(new Bytes(REFRESHED_TIMESTAMP_QUAL_BYTES)));
+ assertEquals(new Bytes(new byte[] { keyData.getKeyState().getVal() }),
+ valueMap.get(new Bytes(KEY_STATE_QUAL_BYTES)));
+ }
+
+ private static Map getValueMap(Put put) {
+ NavigableMap> familyCellMap = put.getFamilyCellMap();
+ List cells = familyCellMap.get(KEY_META_INFO_FAMILY);
+ Map valueMap = new HashMap<>();
+ for (Cell cell : cells) {
+ valueMap.put(
+ new Bytes(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()),
+ new Bytes(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
+ }
+ return valueMap;
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
new file mode 100644
index 000000000000..c44e7d45061b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
@@ -0,0 +1,601 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.clearInvocations;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.stream.Collectors;
+
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+import net.bytebuddy.implementation.bind.annotation.AllArguments;
+import net.bytebuddy.implementation.bind.annotation.Origin;
+import net.bytebuddy.implementation.bind.annotation.RuntimeType;
+import net.bytebuddy.matcher.ElementMatchers;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Suite;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.Spy;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+ TestManagedKeyDataCache.TestGeneric.class,
+ TestManagedKeyDataCache.TestWithoutL2Cache.class,
+ TestManagedKeyDataCache.TestWithL2CacheAndNoDynamicLookup.class,
+ TestManagedKeyDataCache.TestWithL2CacheAndDynamicLookup.class,
+})
+@Category({ MasterTests.class, SmallTests.class })
+public class TestManagedKeyDataCache {
+ private static final String ALIAS = "cust1";
+ private static final byte[] CUST_ID = ALIAS.getBytes();
+ private static Class extends MockManagedKeyProvider> providerClass;
+
+ @Mock
+ private Server server;
+ @Spy
+ protected MockManagedKeyProvider testProvider;
+ protected ManagedKeyDataCache cache;
+ protected Configuration conf = HBaseConfiguration.create();
+
+ public static class ForwardingInterceptor {
+ static ThreadLocal delegate = new ThreadLocal<>();
+
+ static void setDelegate(MockManagedKeyProvider d) {
+ delegate.set(d);
+ }
+
+ @RuntimeType
+ public Object intercept(@Origin Method method, @AllArguments Object[] args) throws Throwable {
+ // Translate the InvocationTargetException that results when the provider throws an exception.
+ // This is actually not needed if the intercept is delegated directly to the spy.
+ try {
+ return method.invoke(delegate.get(), args); // calls the spy, triggering Mockito
+ } catch (InvocationTargetException e) {
+ throw e.getCause();
+ }
+ }
+ }
+
+ @BeforeClass
+ public static synchronized void setUpInterceptor() {
+ if (providerClass != null) {
+ return;
+ }
+ providerClass = new ByteBuddy()
+ .subclass(MockManagedKeyProvider.class)
+ .name("org.apache.hadoop.hbase.io.crypto.MockManagedKeyProviderSpy")
+ .method(ElementMatchers.any()) // Intercept all methods
+ // Using a delegator instead of directly forwarding to testProvider to
+ // facilitate switching the testProvider instance. Besides, it
+ .intercept(MethodDelegation.to(new ForwardingInterceptor()))
+ .make()
+ .load(MockManagedKeyProvider.class.getClassLoader(), ClassLoadingStrategy.Default.INJECTION)
+ .getLoaded();
+ }
+
+ @Before
+ public void setUp() {
+ MockitoAnnotations.openMocks(this);
+ ForwardingInterceptor.setDelegate(testProvider);
+
+ Encryption.clearKeyProviderCache();
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, providerClass.getName());
+
+ // Configure the server mock to return the configuration
+ when(server.getConfiguration()).thenReturn(conf);
+
+ testProvider.setMultikeyGenMode(true);
+ }
+
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestGeneric {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestGeneric.class);
+
+ @Test
+ public void testEmptyCache() throws Exception {
+ ManagedKeyDataCache cache = new ManagedKeyDataCache(HBaseConfiguration.create(), null);
+ assertEquals(0, cache.getGenericCacheEntryCount());
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testActiveKeysCacheKeyEqualsAndHashCode() {
+ byte[] custodian1 = new byte[] {1, 2, 3};
+ byte[] custodian2 = new byte[] {1, 2, 3};
+ byte[] custodian3 = new byte[] {4, 5, 6};
+ String namespace1 = "ns1";
+ String namespace2 = "ns2";
+
+ // Reflexive
+ ManagedKeyDataCache.ActiveKeysCacheKey key1 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace1);
+ assertTrue(key1.equals(key1));
+
+ // Symmetric and consistent for equal content
+ ManagedKeyDataCache.ActiveKeysCacheKey key2 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian2, namespace1);
+ assertTrue(key1.equals(key2));
+ assertTrue(key2.equals(key1));
+ assertEquals(key1.hashCode(), key2.hashCode());
+
+ // Different custodian
+ ManagedKeyDataCache.ActiveKeysCacheKey key3 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace1);
+ assertFalse(key1.equals(key3));
+ assertFalse(key3.equals(key1));
+
+ // Different namespace
+ ManagedKeyDataCache.ActiveKeysCacheKey key4 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace2);
+ assertFalse(key1.equals(key4));
+ assertFalse(key4.equals(key1));
+
+ // Null and different class
+ assertFalse(key1.equals(null));
+ assertFalse(key1.equals("not a key"));
+
+ // Both fields different
+ ManagedKeyDataCache.ActiveKeysCacheKey key5 =
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace2);
+ assertFalse(key1.equals(key5));
+ assertFalse(key5.equals(key1));
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestWithoutL2Cache extends TestManagedKeyDataCache {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestWithoutL2Cache.class);
+
+ @Before
+ public void setUp() {
+ super.setUp();
+ cache = new ManagedKeyDataCache(conf, null);
+ }
+
+ @Test
+ public void testGenericCacheForNonExistentKey() throws Exception {
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(testProvider).unwrapKey(any(String.class), any());
+ }
+
+ public void testWithInvalidProvider() throws Exception {
+ ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ doThrow(new IOException("Test exception")).when(testProvider).unwrapKey(any(String.class),
+ any());
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null));
+ verify(testProvider).unwrapKey(any(String.class), any());
+ // A second call to getEntry should not result in a call to the provider due to -ve entry.
+ clearInvocations(testProvider);
+ verify(testProvider, never()).unwrapKey(any(String.class), any());
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null));
+ doThrow(new IOException("Test exception")).when(testProvider).getManagedKey(any(),
+ any(String.class));
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ // A second call to getRandomEntry should not result in a call to the provider due to -ve
+ // entry.
+ clearInvocations(testProvider);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testGenericCache() throws Exception {
+ ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(globalKey1, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL,
+ globalKey1.getKeyMetadata(), null));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ ManagedKeyData globalKey2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(globalKey2, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL,
+ globalKey2.getKeyMetadata(), null));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ ManagedKeyData globalKey3 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(globalKey3, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL,
+ globalKey3.getKeyMetadata(), null));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCache() throws Exception {
+ assertNotNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ ManagedKeyData activeKey = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(activeKey);
+ assertEquals(activeKey, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testGenericCacheOperations() throws Exception {
+ ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ ManagedKeyData nsKey1 = testProvider.getManagedKey(CUST_ID, "namespace1");
+ assertGenericCacheEntries(nsKey1, globalKey1);
+ ManagedKeyData globalKey2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertGenericCacheEntries(globalKey2, nsKey1, globalKey1);
+ ManagedKeyData nsKey2 = testProvider.getManagedKey(CUST_ID,
+ "namespace1");
+ assertGenericCacheEntries(nsKey2, globalKey2, nsKey1, globalKey1);
+ }
+
+ @Test
+ public void testActiveKeyGetNoActive() throws Exception {
+ testProvider.setMockedKeyState(ALIAS, FAILED);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheOperations() throws Exception {
+ assertNotNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertNotNull(cache.getActiveEntry(CUST_ID, "namespace1"));
+ assertEquals(2, cache.getActiveCacheEntryCount());
+
+ cache.invalidateAll();
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ assertNotNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertEquals(1, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testGenericCacheUsingActiveKeysCacheOverProvider() throws Exception {
+ ManagedKeyData key = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key);
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(testProvider, never()).unwrapKey(any(String.class), any());
+ }
+
+ @Test
+ public void testThatActiveKeysCache_SkipsProvider_WhenLoadedViaGenericCache() throws Exception {
+ ManagedKeyData key1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(key1, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null));
+ ManagedKeyData key2 = testProvider.getManagedKey(CUST_ID, "namespace1");
+ assertEquals(key2, cache.getEntry(CUST_ID, "namespace1", key2.getKeyMetadata(), null));
+ verify(testProvider, times(2)).getManagedKey(any(), any(String.class));
+ assertEquals(2, cache.getActiveCacheEntryCount());
+ clearInvocations(testProvider);
+ assertEquals(key1, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ assertEquals(key2, cache.getActiveEntry(CUST_ID, "namespace1"));
+ // ACTIVE keys are automatically added to activeKeysCache when loaded
+ // via getEntry, so getActiveEntry will find them there and won't call the provider
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ cache.invalidateAll();
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testThatNonActiveKey_IsIgnored_WhenLoadedViaGenericCache() throws Exception {
+ testProvider.setMockedKeyState(ALIAS, FAILED);
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ assertEquals(0, cache.getActiveCacheEntryCount());
+
+ testProvider.setMockedKeyState(ALIAS, DISABLED);
+ key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ assertEquals(0, cache.getActiveCacheEntryCount());
+
+ testProvider.setMockedKeyState(ALIAS, INACTIVE);
+ key = testProvider.getManagedKey(CUST_ID, "namespace1");
+ assertEquals(key, cache.getEntry(CUST_ID, "namespace1", key.getKeyMetadata(), null));
+ assertEquals(0, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testActiveKeysCacheWithMultipleCustodiansInGenericCache() throws Exception {
+ ManagedKeyData key1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null));
+ String alias2 = "cust2";
+ byte[] cust_id2 = alias2.getBytes();
+ ManagedKeyData key2 = testProvider.getManagedKey(cust_id2, KEY_SPACE_GLOBAL);
+ assertNotNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key2.getKeyMetadata(), null));
+ assertNotNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ // ACTIVE keys are automatically added to activeKeysCache when loaded.
+ assertEquals(1, cache.getActiveCacheEntryCount());
+ }
+
+ @Test
+ public void testActiveKeysCacheWithMultipleNamespaces() throws Exception {
+ ManagedKeyData key1 = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key1);
+ assertEquals(key1, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ ManagedKeyData key2 = cache.getActiveEntry(CUST_ID, "namespace1");
+ assertNotNull(key2);
+ assertEquals(key2, cache.getActiveEntry(CUST_ID, "namespace1"));
+ ManagedKeyData key3 = cache.getActiveEntry(CUST_ID, "namespace2");
+ assertNotNull(key3);
+ assertEquals(key3, cache.getActiveEntry(CUST_ID, "namespace2"));
+ verify(testProvider, times(3)).getManagedKey(any(), any(String.class));
+ assertEquals(3, cache.getActiveCacheEntryCount());
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestWithL2CacheAndNoDynamicLookup extends TestManagedKeyDataCache {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestWithL2CacheAndNoDynamicLookup.class);
+ private KeymetaTableAccessor mockL2 = mock(KeymetaTableAccessor.class);
+
+ @Before
+ public void setUp() {
+ super.setUp();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, false);
+ cache = new ManagedKeyDataCache(conf, mockL2);
+ }
+
+ @Test
+ public void testGenericCacheNonExistentKeyInL2Cache() throws Exception {
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2).getKey(any(), any(String.class), any(String.class));
+ clearInvocations(mockL2);
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2, never()).getKey(any(), any(String.class), any(String.class));
+ }
+
+ @Test
+ public void testGenericCacheRetrievalFromL2Cache() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata()))
+ .thenReturn(key);
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(mockL2).getKey(any(), any(String.class), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheNonExistentKeyInL2Cache() throws Exception {
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getActiveKey(any(), any(String.class));
+ clearInvocations(mockL2);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2, never()).getActiveKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheRetrievalFromL2Cache() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL))
+ .thenReturn(key);
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getActiveKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testGenericCacheWithKeymetaAccessorException() throws Exception {
+ when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata"))
+ .thenThrow(new IOException("Test exception"));
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2).getKey(any(), any(String.class), any(String.class));
+ clearInvocations(mockL2);
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2, never()).getKey(any(), any(String.class), any(String.class));
+ }
+
+ @Test
+ public void testGetActiveEntryWithKeymetaAccessorException() throws Exception {
+ when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL))
+ .thenThrow(new IOException("Test exception"));
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getActiveKey(any(), any(String.class));
+ clearInvocations(mockL2);
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2, never()).getActiveKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheUsesKeymetaAccessorWhenGenericCacheEmpty() throws Exception {
+ // Ensure generic cache is empty
+ cache.invalidateAll();
+
+ // Mock the keymetaAccessor to return a key
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL))
+ .thenReturn(key);
+
+ // Get the active entry - it should call keymetaAccessor since generic cache is empty
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getActiveKey(any(), any(String.class));
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestWithL2CacheAndDynamicLookup extends TestManagedKeyDataCache {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestWithL2CacheAndDynamicLookup.class);
+ private KeymetaTableAccessor mockL2 = mock(KeymetaTableAccessor.class);
+
+ @Before
+ public void setUp() {
+ super.setUp();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, true);
+ cache = new ManagedKeyDataCache(conf, mockL2);
+ }
+
+ @Test
+ public void testGenericCacheRetrivalFromProviderWhenKeyNotFoundInL2Cache() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ doReturn(key).when(testProvider).unwrapKey(any(String.class), any());
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(mockL2).getKey(any(), any(String.class), any(String.class));
+ verify(mockL2).addKey(any(ManagedKeyData.class));
+ }
+
+ @Test
+ public void testAddKeyFailure() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ doReturn(key).when(testProvider).unwrapKey(any(String.class), any());
+ doThrow(new IOException("Test exception")).when(mockL2).addKey(any(ManagedKeyData.class));
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(mockL2).addKey(any(ManagedKeyData.class));
+ }
+
+ @Test
+ public void testGenericCacheDynamicLookupUnexpectedException() throws Exception {
+ doThrow(new RuntimeException("Test exception")).when(testProvider).unwrapKey(any(String.class), any());
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
+ verify(mockL2).getKey(any(), any(String.class), any(String.class));
+ verify(mockL2, never()).addKey(any(ManagedKeyData.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheDynamicLookupWithUnexpectedException() throws Exception {
+ doThrow(new RuntimeException("Test exception")).when(testProvider).getManagedKey(any(),
+ any(String.class));
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+ // A 2nd invocation should not result in a call to the provider.
+ assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testActiveKeysCacheRetrivalFromProviderWhenKeyNotFoundInL2Cache() throws Exception {
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ doReturn(key).when(testProvider).getManagedKey(any(), any(String.class));
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(mockL2).getActiveKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testGenericCacheUsesActiveKeysCacheFirst() throws Exception {
+ // First populate the active keys cache with an active key
+ ManagedKeyData key1 = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+
+ // Now get the generic cache entry - it should use the active keys cache first, not call
+ // keymetaAccessor
+ assertEquals(key1, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+
+ // Lookup a diffrent key.
+ ManagedKeyData key2 = cache.getActiveEntry(CUST_ID, "namespace1");
+ assertNotEquals(key1, key2);
+ verify(testProvider).getManagedKey(any(), any(String.class));
+ clearInvocations(testProvider);
+
+ // Now get the generic cache entry - it should use the active keys cache first, not call
+ // keymetaAccessor
+ assertEquals(key2, cache.getEntry(CUST_ID, "namespace1", key2.getKeyMetadata(), null));
+ verify(testProvider, never()).getManagedKey(any(), any(String.class));
+ }
+
+ @Test
+ public void testGetOlderEntryFromGenericCache() throws Exception {
+ // Get one version of the key in to ActiveKeysCache
+ ManagedKeyData key1 = cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL);
+ assertNotNull(key1);
+ clearInvocations(testProvider);
+
+ // Now try to lookup another version of the key, it should lookup and discard the active key.
+ ManagedKeyData key2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(key2, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key2.getKeyMetadata(), null));
+ verify(testProvider).unwrapKey(any(String.class), any());
+ }
+
+ @Test
+ public void testThatActiveKeysCache_PopulatedByGenericCache() throws Exception {
+ // First populate the generic cache with an active key
+ ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
+ verify(testProvider).unwrapKey(any(String.class), any());
+
+ // Clear invocations to reset the mock state
+ clearInvocations(testProvider);
+
+ // Now get the active entry - it should already be there due to the generic cache first
+ assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
+ verify(testProvider, never()).unwrapKey(any(String.class), any());
+ }
+ }
+
+ protected void assertGenericCacheEntries(ManagedKeyData... keys) throws Exception {
+ for (ManagedKeyData key: keys) {
+ assertEquals(key, cache.getEntry(key.getKeyCustodian(), key.getKeyNamespace(),
+ key.getKeyMetadata(), null));
+ }
+ assertEquals(keys.length, cache.getGenericCacheEntryCount());
+ int activeKeysCount = Arrays.stream(keys)
+ .filter(key -> key.getKeyState() == ManagedKeyState.ACTIVE)
+ .map(key -> new ManagedKeyDataCache.ActiveKeysCacheKey(key.getKeyCustodian(),
+ key.getKeyNamespace()))
+ .collect(Collectors.toSet())
+ .size();
+ assertEquals(activeKeysCount, cache.getActiveCacheEntryCount());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
new file mode 100644
index 000000000000..1ffed4707475
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.security.KeyException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestManagedKeymeta extends ManagedKeyTestBase {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagedKeymeta.class);
+
+ @Test
+ public void testEnableLocal() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin();
+ doTestEnable(keymetaAdmin);
+ }
+
+ @Test
+ public void testEnableOverRPC() throws Exception {
+ KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ doTestEnable(adminClient);
+ }
+
+ private void doTestEnable(KeymetaAdmin adminClient) throws IOException, KeyException {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ MockManagedKeyProvider managedKeyProvider = (MockManagedKeyProvider)
+ Encryption.getKeyProvider(master.getConfiguration());
+ String cust = "cust1";
+ String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes());
+ List managedKeyStates =
+ adminClient.enableKeyManagement(encodedCust, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyDataListSingleKey(managedKeyStates, ManagedKeyState.ACTIVE);
+
+ List managedKeys =
+ adminClient.getManagedKeys(encodedCust, ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertEquals(1, managedKeys.size());
+ assertEquals(managedKeyProvider.getLastGeneratedKeyData(cust,
+ ManagedKeyData.KEY_SPACE_GLOBAL).cloneWithoutKey(), managedKeys.get(0).cloneWithoutKey());
+
+ String nonExistentCust = "nonExistentCust";
+ managedKeyProvider.setMockedKeyState(nonExistentCust, ManagedKeyState.FAILED);
+ List keyDataList1 =
+ adminClient.enableKeyManagement(ManagedKeyProvider.encodeToStr(nonExistentCust.getBytes()),
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyDataListSingleKey(keyDataList1, ManagedKeyState.FAILED);
+
+ String disabledCust = "disabledCust";
+ managedKeyProvider.setMockedKeyState(disabledCust, ManagedKeyState.DISABLED);
+ List keyDataList2 =
+ adminClient.enableKeyManagement(ManagedKeyProvider.encodeToStr(disabledCust.getBytes()),
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyDataListSingleKey(keyDataList2, ManagedKeyState.DISABLED);
+ }
+
+ private static void assertKeyDataListSingleKey(List managedKeyStates,
+ ManagedKeyState keyState) {
+ assertNotNull(managedKeyStates);
+ assertEquals(1, managedKeyStates.size());
+ assertEquals(keyState, managedKeyStates.get(0).getKeyState());
+ }
+
+ @Test
+ public void testEnableKeyManagementWithServiceException() throws Exception {
+ ManagedKeysProtos.ManagedKeysService.BlockingInterface mockStub =
+ mock(ManagedKeysProtos.ManagedKeysService.BlockingInterface.class);
+
+ ServiceException networkError = new ServiceException("Network error");
+ networkError.initCause(new IOException("Network error"));
+ when(mockStub.enableKeyManagement(any(), any())).thenThrow(networkError);
+
+ KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ // Use reflection to set the stub
+ Field stubField = KeymetaAdminClient.class.getDeclaredField("stub");
+ stubField.setAccessible(true);
+ stubField.set(client, mockStub);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ client.enableKeyManagement("cust", "namespace");
+ });
+
+ assertTrue(exception.getMessage().contains("Network error"));
+ }
+
+ @Test
+ public void testGetManagedKeysWithServiceException() throws Exception {
+ // Similar test for getManagedKeys method
+ ManagedKeysProtos.ManagedKeysService.BlockingInterface mockStub =
+ mock(ManagedKeysProtos.ManagedKeysService.BlockingInterface.class);
+
+ ServiceException networkError = new ServiceException("Network error");
+ networkError.initCause(new IOException("Network error"));
+ when(mockStub.getManagedKeys(any(), any())).thenThrow(networkError);
+
+ KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection());
+ Field stubField = KeymetaAdminClient.class.getDeclaredField("stub");
+ stubField.setAccessible(true);
+ stubField.set(client, mockStub);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ client.getManagedKeys("cust", "namespace");
+ });
+
+ assertTrue(exception.getMessage().contains("Network error"));
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
new file mode 100644
index 000000000000..a92818f8aada
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
@@ -0,0 +1,310 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Key;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+/**
+ * Tests for SystemKeyCache class
+ */
+@Category({ MasterTests.class, SmallTests.class })
+public class TestSystemKeyCache {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSystemKeyCache.class);
+
+ @Mock
+ private SystemKeyAccessor mockAccessor;
+
+ private static final byte[] TEST_CUSTODIAN = "test-custodian".getBytes();
+ private static final String TEST_NAMESPACE = "test-namespace";
+ private static final String TEST_METADATA_1 = "metadata-1";
+ private static final String TEST_METADATA_2 = "metadata-2";
+ private static final String TEST_METADATA_3 = "metadata-3";
+
+ private Key testKey1;
+ private Key testKey2;
+ private Key testKey3;
+ private ManagedKeyData keyData1;
+ private ManagedKeyData keyData2;
+ private ManagedKeyData keyData3;
+ private Path keyPath1;
+ private Path keyPath2;
+ private Path keyPath3;
+
+ @Before
+ public void setUp() {
+ MockitoAnnotations.openMocks(this);
+
+ // Create test keys
+ testKey1 = new SecretKeySpec("test-key-1-bytes".getBytes(), "AES");
+ testKey2 = new SecretKeySpec("test-key-2-bytes".getBytes(), "AES");
+ testKey3 = new SecretKeySpec("test-key-3-bytes".getBytes(), "AES");
+
+ // Create test key data with different checksums
+ keyData1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey1,
+ ManagedKeyState.ACTIVE, TEST_METADATA_1, 1000L);
+ keyData2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey2,
+ ManagedKeyState.ACTIVE, TEST_METADATA_2, 2000L);
+ keyData3 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey3,
+ ManagedKeyState.ACTIVE, TEST_METADATA_3, 3000L);
+
+ // Create test paths
+ keyPath1 = new Path("/system/keys/key1");
+ keyPath2 = new Path("/system/keys/key2");
+ keyPath3 = new Path("/system/keys/key3");
+ }
+
+ @Test
+ public void testCreateCacheWithSingleSystemKey() throws Exception {
+ // Setup
+ List keyPaths = Collections.singletonList(keyPath1);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNotNull(cache);
+ assertSame(keyData1, cache.getLatestSystemKey());
+ assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum()));
+ assertNull(cache.getSystemKeyByChecksum(999L)); // Non-existent checksum
+
+ verify(mockAccessor).getAllSystemKeyFiles();
+ verify(mockAccessor).loadSystemKey(keyPath1);
+ }
+
+ @Test
+ public void testCreateCacheWithMultipleSystemKeys() throws Exception {
+ // Setup - keys should be processed in order, first one becomes latest
+ List keyPaths = Arrays.asList(keyPath1, keyPath2, keyPath3);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+ when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2);
+ when(mockAccessor.loadSystemKey(keyPath3)).thenReturn(keyData3);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNotNull(cache);
+ assertSame(keyData1, cache.getLatestSystemKey()); // First key becomes latest
+
+ // All keys should be accessible by checksum
+ assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum()));
+ assertSame(keyData2, cache.getSystemKeyByChecksum(keyData2.getKeyChecksum()));
+ assertSame(keyData3, cache.getSystemKeyByChecksum(keyData3.getKeyChecksum()));
+
+ // Non-existent checksum should return null
+ assertNull(cache.getSystemKeyByChecksum(999L));
+
+ verify(mockAccessor).getAllSystemKeyFiles();
+ verify(mockAccessor).loadSystemKey(keyPath1);
+ verify(mockAccessor).loadSystemKey(keyPath2);
+ verify(mockAccessor).loadSystemKey(keyPath3);
+ }
+
+ @Test
+ public void testCreateCacheWithNoSystemKeyFiles() throws Exception {
+ // Setup - this covers the uncovered lines 46-47
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(Collections.emptyList());
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNull(cache);
+ verify(mockAccessor).getAllSystemKeyFiles();
+ }
+
+ @Test
+ public void testCreateCacheWithEmptyKeyFilesList() throws Exception {
+ // Setup - alternative empty scenario
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(new ArrayList<>());
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNull(cache);
+ verify(mockAccessor).getAllSystemKeyFiles();
+ }
+
+ @Test
+ public void testGetLatestSystemKeyConsistency() throws Exception {
+ // Setup
+ List keyPaths = Arrays.asList(keyPath1, keyPath2);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+ when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify - latest key should be consistent across calls
+ ManagedKeyData latest1 = cache.getLatestSystemKey();
+ ManagedKeyData latest2 = cache.getLatestSystemKey();
+
+ assertNotNull(latest1);
+ assertSame(latest1, latest2);
+ assertSame(keyData1, latest1); // First key should be latest
+ }
+
+ @Test
+ public void testGetSystemKeyByChecksumWithDifferentKeys() throws Exception {
+ // Setup
+ List keyPaths = Arrays.asList(keyPath1, keyPath2, keyPath3);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+ when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2);
+ when(mockAccessor.loadSystemKey(keyPath3)).thenReturn(keyData3);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify each key can be retrieved by its unique checksum
+ long checksum1 = keyData1.getKeyChecksum();
+ long checksum2 = keyData2.getKeyChecksum();
+ long checksum3 = keyData3.getKeyChecksum();
+
+ // Checksums should be different
+ assert checksum1 != checksum2;
+ assert checksum2 != checksum3;
+ assert checksum1 != checksum3;
+
+ // Each key should be retrievable by its checksum
+ assertSame(keyData1, cache.getSystemKeyByChecksum(checksum1));
+ assertSame(keyData2, cache.getSystemKeyByChecksum(checksum2));
+ assertSame(keyData3, cache.getSystemKeyByChecksum(checksum3));
+ }
+
+ @Test
+ public void testGetSystemKeyByChecksumWithNonExistentChecksum() throws Exception {
+ // Setup
+ List keyPaths = Collections.singletonList(keyPath1);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify
+ assertNotNull(cache);
+
+ // Test various non-existent checksums
+ assertNull(cache.getSystemKeyByChecksum(0L));
+ assertNull(cache.getSystemKeyByChecksum(-1L));
+ assertNull(cache.getSystemKeyByChecksum(Long.MAX_VALUE));
+ assertNull(cache.getSystemKeyByChecksum(Long.MIN_VALUE));
+
+ // But the actual checksum should work
+ assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum()));
+ }
+
+ @Test(expected = IOException.class)
+ public void testCreateCacheWithAccessorIOException() throws Exception {
+ // Setup - accessor throws IOException
+ when(mockAccessor.getAllSystemKeyFiles()).thenThrow(new IOException("File system error"));
+
+ // Execute - should propagate the IOException
+ SystemKeyCache.createCache(mockAccessor);
+ }
+
+ @Test(expected = IOException.class)
+ public void testCreateCacheWithLoadSystemKeyIOException() throws Exception {
+ // Setup - loading key throws IOException
+ List keyPaths = Collections.singletonList(keyPath1);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenThrow(new IOException("Key load error"));
+
+ // Execute - should propagate the IOException
+ SystemKeyCache.createCache(mockAccessor);
+ }
+
+ @Test
+ public void testCacheWithKeysHavingSameChecksum() throws Exception {
+ // Setup - create two keys that will have the same checksum (same content)
+ Key sameKey1 = new SecretKeySpec("identical-bytes".getBytes(), "AES");
+ Key sameKey2 = new SecretKeySpec("identical-bytes".getBytes(), "AES");
+
+ ManagedKeyData sameManagedKey1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE,
+ sameKey1, ManagedKeyState.ACTIVE, "metadata-A", 1000L);
+ ManagedKeyData sameManagedKey2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE,
+ sameKey2, ManagedKeyState.ACTIVE, "metadata-B", 2000L);
+
+ // Verify they have the same checksum
+ assertEquals(sameManagedKey1.getKeyChecksum(), sameManagedKey2.getKeyChecksum());
+
+ List keyPaths = Arrays.asList(keyPath1, keyPath2);
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths);
+ when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(sameManagedKey1);
+ when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(sameManagedKey2);
+
+ // Execute
+ SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor);
+
+ // Verify - second key should overwrite first in the map due to same checksum
+ assertNotNull(cache);
+ assertSame(sameManagedKey1, cache.getLatestSystemKey()); // First is still latest
+
+ // The map should contain the second key for the shared checksum
+ ManagedKeyData retrievedKey = cache.getSystemKeyByChecksum(sameManagedKey1.getKeyChecksum());
+ assertSame(sameManagedKey2, retrievedKey); // Last one wins in TreeMap
+ }
+
+ @Test
+ public void testCreateCacheWithUnexpectedNullKeyData() throws Exception {
+ when(mockAccessor.getAllSystemKeyFiles()).thenReturn(Arrays.asList(keyPath1));
+ when(mockAccessor.loadSystemKey(keyPath1)).thenThrow(new RuntimeException("Key load error"));
+
+ RuntimeException ex = assertThrows(RuntimeException.class, () -> {
+ SystemKeyCache.createCache(mockAccessor);
+ });
+ assertTrue(ex.getMessage().equals("Key load error"));
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index e78ca7d0cdb7..013648d41c4d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -38,6 +38,9 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.hbck.HbckChore;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
@@ -116,6 +119,18 @@ public ChoreService getChoreService() {
return null;
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public CatalogJanitor getCatalogJanitor() {
return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index a25bae6ec7bd..b63bbbaac8be 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -52,6 +52,9 @@
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
@@ -556,6 +559,18 @@ public ChoreService getChoreService() {
return null;
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public void updateRegionFavoredNodesMapping(String encodedRegionName,
List favoredNodes) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index fcb67ed31b47..ed11d69420ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -33,6 +33,9 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskGroup;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -327,5 +330,17 @@ public ClusterStatusTracker getClusterStatusTracker() {
public ActiveMasterManager getActiveMasterManager() {
return activeMasterManager;
}
+
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
new file mode 100644
index 000000000000..9304029aedf7
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assume.assumeTrue;
+import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdminImpl;
+import org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.runners.Suite;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestKeymetaAdminImpl.TestWhenDisabled.class,
+ TestKeymetaAdminImpl.TestAdminImpl.class,
+ TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class,
+})
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaAdminImpl {
+ private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+ private static final String CUST = "cust1";
+ private static final String ENCODED_CUST = ManagedKeyProvider.encodeToStr(CUST.getBytes());
+
+
+ @Rule
+ public TestName name = new TestName();
+
+ protected Configuration conf;
+ protected Path testRootDir;
+ protected FileSystem fs;
+
+ protected FileSystem mockFileSystem = mock(FileSystem.class);
+ protected Server mockServer = mock(Server.class);
+ protected KeymetaAdminImplForTest keymetaAdmin;
+ KeymetaTableAccessor keymetaAccessor = mock(KeymetaTableAccessor.class);
+
+ @Before
+ public void setUp() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ testRootDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+ fs = testRootDir.getFileSystem(conf);
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName());
+
+ when(mockServer.getFileSystem()).thenReturn(mockFileSystem);
+ when(mockServer.getConfiguration()).thenReturn(conf);
+ keymetaAdmin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestWhenDisabled extends TestKeymetaAdminImpl {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestWhenDisabled.class);
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
+ }
+
+ @Test
+ public void testDisabled() throws Exception {
+ assertThrows(IOException.class,
+ () -> keymetaAdmin.enableKeyManagement(ManagedKeyData.KEY_GLOBAL_CUSTODIAN,
+ KEY_SPACE_GLOBAL));
+ assertThrows(IOException.class,
+ () -> keymetaAdmin.getManagedKeys(ManagedKeyData.KEY_GLOBAL_CUSTODIAN,
+ KEY_SPACE_GLOBAL));
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAdminImpl extends TestKeymetaAdminImpl {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAdminImpl.class);
+
+ @Parameter(0)
+ public String keySpace;
+ @Parameter(1)
+ public ManagedKeyState keyState;
+ @Parameter(2)
+ public boolean isNullKey;
+
+ @Parameters(name = "{index},keySpace={1},keyState={2}")
+ public static Collection data() {
+ return Arrays.asList(
+ new Object[][] {
+ { KEY_SPACE_GLOBAL, ACTIVE, false },
+ { "ns1", ACTIVE, false },
+ { KEY_SPACE_GLOBAL, FAILED, true },
+ { KEY_SPACE_GLOBAL, INACTIVE, false },
+ { KEY_SPACE_GLOBAL, DISABLED, true },
+ });
+ }
+
+ @Test
+ public void testEnableAndGet() throws Exception {
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getKeyProvider(conf);
+ managedKeyProvider.setMockedKeyState(CUST, keyState);
+ when(keymetaAccessor.getActiveKey(CUST.getBytes(), keySpace)).thenReturn(
+ managedKeyProvider.getManagedKey(CUST.getBytes(), keySpace));
+
+ List managedKeys =
+ keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace);
+ assertNotNull(managedKeys);
+ assertEquals(1, managedKeys.size());
+ assertEquals(keyState, managedKeys.get(0).getKeyState());
+ verify(keymetaAccessor).getActiveKey(CUST.getBytes(), keySpace);
+
+ keymetaAdmin.getManagedKeys(ENCODED_CUST, keySpace);
+ verify(keymetaAccessor).getAllKeys(CUST.getBytes(), keySpace);
+ }
+
+ @Test
+ public void testEnableKeyManagement() throws Exception {
+ assumeTrue(keyState == ACTIVE);
+ List keys = keymetaAdmin.enableKeyManagement(ENCODED_CUST, "namespace1");
+ assertEquals(1, keys.size());
+ assertEquals(ManagedKeyState.ACTIVE, keys.get(0).getKeyState());
+ assertEquals(ENCODED_CUST, keys.get(0).getKeyCustodianEncoded());
+ assertEquals("namespace1", keys.get(0).getKeyNamespace());
+
+ // Second call should return the same keys since our mock key provider returns the same key
+ List keys2 = keymetaAdmin.enableKeyManagement(ENCODED_CUST, "namespace1");
+ assertEquals(1, keys2.size());
+ assertEquals(keys.get(0), keys2.get(0));
+ }
+
+ @Test
+ public void testEnableKeyManagementWithMultipleNamespaces() throws Exception {
+ List keys = keymetaAdmin.enableKeyManagement(ENCODED_CUST, "namespace1");
+ assertEquals(1, keys.size());
+ assertEquals("namespace1", keys.get(0).getKeyNamespace());
+
+ List keys2 = keymetaAdmin.enableKeyManagement(ENCODED_CUST, "namespace2");
+ assertEquals(1, keys2.size());
+ assertEquals("namespace2", keys2.get(0).getKeyNamespace());
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestForKeyProviderNullReturn extends TestKeymetaAdminImpl {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestForKeyProviderNullReturn.class);
+
+ @Parameter(0)
+ public String keySpace;
+
+ @Parameters(name = "{index},keySpace={0}")
+ public static Collection data() {
+ return Arrays.asList(
+ new Object[][] {
+ { KEY_SPACE_GLOBAL },
+ { "ns1" },
+ });
+ }
+
+ @Test
+ public void test() throws Exception {
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getKeyProvider(conf);
+ String cust = "invalidcust1";
+ String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes());
+ managedKeyProvider.setMockedKey(cust, null, keySpace);
+ IOException ex = assertThrows(IOException.class,
+ () -> keymetaAdmin.enableKeyManagement(encodedCust, keySpace));
+ assertEquals("Invalid null managed key received from key provider", ex.getMessage());
+ }
+ }
+
+ private class KeymetaAdminImplForTest extends KeymetaAdminImpl {
+ public KeymetaAdminImplForTest(Server mockServer, KeymetaTableAccessor mockAccessor) {
+ super(mockServer);
+ }
+
+ @Override
+ public void addKey(ManagedKeyData keyData) throws IOException {
+ keymetaAccessor.addKey(keyData);
+ }
+
+ @Override
+ public List getAllKeys(byte[] key_cust, String keyNamespace)
+ throws IOException, KeyException {
+ return keymetaAccessor.getAllKeys(key_cust, keyNamespace);
+ }
+
+ @Override
+ public ManagedKeyData getActiveKey(byte[] key_cust, String keyNamespace)
+ throws IOException, KeyException {
+ return keymetaAccessor.getActiveKey(key_cust, keyNamespace);
+ }
+ }
+
+ protected boolean assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState,
+ Key expectedKey) {
+ assertNotNull(keyData);
+ assertEquals(expKeyState, keyData.getKeyState());
+ if (expectedKey == null) {
+ assertNull(keyData.getTheKey());
+ }
+ else {
+ byte[] keyBytes = keyData.getTheKey().getEncoded();
+ byte[] expectedKeyBytes = expectedKey.getEncoded();
+ assertEquals(expectedKeyBytes.length, keyBytes.length);
+ assertEquals(new Bytes(expectedKeyBytes), keyBytes);
+ }
+ return true;
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
new file mode 100644
index 000000000000..d7045b245616
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
@@ -0,0 +1,521 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Key;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.IntStream;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ClusterId;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.runners.Suite;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+ TestSystemKeyAccessorAndManager.TestAccessorWhenDisabled.class,
+ TestSystemKeyAccessorAndManager.TestManagerWhenDisabled.class,
+ TestSystemKeyAccessorAndManager.TestAccessor.class,
+ TestSystemKeyAccessorAndManager.TestForInvalidFilenames.class,
+ TestSystemKeyAccessorAndManager.TestManagerForErrors.class,
+ TestSystemKeyAccessorAndManager.TestAccessorMisc.class // ADD THIS
+})
+@Category({ MasterTests.class, SmallTests.class })
+public class TestSystemKeyAccessorAndManager {
+ private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+ @Rule
+ public TestName name = new TestName();
+
+ protected Configuration conf;
+ protected Path testRootDir;
+ protected FileSystem fs;
+
+ protected FileSystem mockFileSystem = mock(FileSystem.class);
+ protected MasterServices mockMaster = mock(MasterServices.class);
+ protected SystemKeyManager systemKeyManager;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ testRootDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+ fs = testRootDir.getFileSystem(conf);
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+
+ when(mockMaster.getFileSystem()).thenReturn(mockFileSystem);
+ when(mockMaster.getConfiguration()).thenReturn(conf);
+ systemKeyManager = new SystemKeyManager(mockMaster);
+ }
+
+ private static FileStatus createMockFile(String fileName) {
+ Path mockPath = mock(Path.class);
+ when(mockPath.getName()).thenReturn(fileName);
+ FileStatus mockFileStatus = mock(FileStatus.class);
+ when(mockFileStatus.getPath()).thenReturn(mockPath);
+ return mockFileStatus;
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAccessorWhenDisabled extends TestSystemKeyAccessorAndManager {
+ @ClassRule public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAccessorWhenDisabled.class);
+
+ @Override public void setUp() throws Exception {
+ super.setUp();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
+ }
+
+ @Test public void test() throws Exception {
+ assertNull(systemKeyManager.getAllSystemKeyFiles());
+ assertNull(systemKeyManager.getLatestSystemKeyFile().getFirst());
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestManagerWhenDisabled extends TestSystemKeyAccessorAndManager {
+ @ClassRule public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagerWhenDisabled.class);
+
+ @Override public void setUp() throws Exception {
+ super.setUp();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
+ }
+
+ @Test public void test() throws Exception {
+ systemKeyManager.ensureSystemKeyInitialized();
+ assertNull(systemKeyManager.rotateSystemKeyIfChanged());
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAccessor extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAccessor.class);
+
+ @Test
+ public void testGetLatestWithNone() throws Exception {
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
+
+ RuntimeException ex = assertThrows(RuntimeException.class,
+ () -> systemKeyManager.getLatestSystemKeyFile());
+ assertEquals("No cluster key initialized yet", ex.getMessage());
+ }
+
+ @Test
+ public void testGetWithSingle() throws Exception {
+ String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
+ FileStatus mockFileStatus = createMockFile(fileName);
+
+ Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
+ when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX+"*"))))
+ .thenReturn(new FileStatus[] { mockFileStatus });
+
+ List files = systemKeyManager.getAllSystemKeyFiles();
+ assertEquals(1, files.size());
+ assertEquals(fileName, files.get(0).getName());
+
+ Pair> latestSystemKeyFileResult = systemKeyManager.getLatestSystemKeyFile();
+ assertEquals(fileName, latestSystemKeyFileResult.getFirst().getName());
+
+ assertEquals(1, SystemKeyAccessor.extractSystemKeySeqNum(
+ latestSystemKeyFileResult.getFirst()));
+ }
+
+ @Test
+ public void testGetWithMultiple() throws Exception {
+ FileStatus[] mockFileStatuses = IntStream.rangeClosed(1, 3)
+ .mapToObj(i -> createMockFile(SYSTEM_KEY_FILE_PREFIX + i))
+ .toArray(FileStatus[]::new);
+
+ Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
+ when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX+"*"))))
+ .thenReturn(mockFileStatuses);
+
+ List files = systemKeyManager.getAllSystemKeyFiles();
+ assertEquals(3, files.size());
+
+ Pair> latestSystemKeyFileResult = systemKeyManager.getLatestSystemKeyFile();
+ assertEquals(3,
+ SystemKeyAccessor.extractSystemKeySeqNum(latestSystemKeyFileResult.getFirst()));
+ }
+
+ @Test
+ public void testExtractKeySequenceForInvalidFilename() throws Exception {
+ assertEquals(-1, SystemKeyAccessor.extractKeySequence(
+ createMockFile("abcd").getPath()));
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestForInvalidFilenames extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestForInvalidFilenames.class);
+
+ @Parameter(0)
+ public String fileName;
+ @Parameter(1)
+ public String expectedErrorMessage;
+
+ @Parameters(name = "{index},fileName={0}")
+ public static Collection data() {
+ return Arrays.asList(new Object[][] {
+ { "abcd", "Couldn't parse key file name: abcd" },
+ {SYSTEM_KEY_FILE_PREFIX+"abcd", "Couldn't parse key file name: "+
+ SYSTEM_KEY_FILE_PREFIX+"abcd"},
+ // Add more test cases here
+ });
+ }
+
+ @Test
+ public void test() throws Exception {
+ FileStatus mockFileStatus = createMockFile(fileName);
+
+ IOException ex = assertThrows(IOException.class,
+ () -> SystemKeyAccessor.extractSystemKeySeqNum(mockFileStatus.getPath()));
+ assertEquals(expectedErrorMessage, ex.getMessage());
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestManagerForErrors extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestManagerForErrors.class);
+
+ private static final String CLUSTER_ID = "clusterId";
+
+ @Mock
+ ManagedKeyProvider mockKeyProvide;
+ @Mock
+ MasterFileSystem masterFS;
+
+ private MockSystemKeyManager manager;
+ private AutoCloseable closeableMocks;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ closeableMocks = MockitoAnnotations.openMocks(this);
+
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
+ ClusterId clusterId = mock(ClusterId.class);
+ when(mockMaster.getMasterFileSystem()).thenReturn(masterFS);
+ when(masterFS.getClusterId()).thenReturn(clusterId);
+ when(clusterId.toString()).thenReturn(CLUSTER_ID);
+ when(masterFS.getFileSystem()).thenReturn(mockFileSystem);
+
+ manager = new MockSystemKeyManager(mockMaster, mockKeyProvide);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ closeableMocks.close();
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_WithNoSystemKeys() throws Exception {
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(null);
+
+ IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized);
+ assertEquals("Failed to get system key for cluster id: " + CLUSTER_ID, ex.getMessage());
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_WithNoNonActiveKey() throws Exception {
+ String metadata = "key-metadata";
+ ManagedKeyData keyData = mock(ManagedKeyData.class);
+ when(keyData.getKeyState()).thenReturn(INACTIVE);
+ when(keyData.getKeyMetadata()).thenReturn(metadata);
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
+
+ IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized);
+ assertEquals("System key is expected to be ACTIVE but it is: INACTIVE for metadata: "
+ + metadata, ex.getMessage());
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_WithInvalidMetadata() throws Exception {
+ ManagedKeyData keyData = mock(ManagedKeyData.class);
+ when(keyData.getKeyState()).thenReturn(ACTIVE);
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
+
+ IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized);
+ assertEquals("System key is expected to have metadata but it is null", ex.getMessage());
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_WithSaveFailure() throws Exception {
+ String metadata = "key-metadata";
+ ManagedKeyData keyData = mock(ManagedKeyData.class);
+ when(keyData.getKeyState()).thenReturn(ACTIVE);
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
+ when(keyData.getKeyMetadata()).thenReturn(metadata);
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
+ Path rootDir = CommonFSUtils.getRootDir(conf);
+ when(masterFS.getTempDir()).thenReturn(rootDir);
+ FSDataOutputStream mockStream = mock(FSDataOutputStream.class);
+ when(mockFileSystem.create(any())).thenReturn(mockStream);
+ when(mockFileSystem.rename(any(), any())).thenReturn(false);
+
+ RuntimeException ex = assertThrows(RuntimeException.class,
+ manager::ensureSystemKeyInitialized);
+ assertEquals("Failed to generate or save System Key", ex.getMessage());
+ }
+
+ @Test
+ public void testEnsureSystemKeyInitialized_RaceCondition() throws Exception {
+ String metadata = "key-metadata";
+ ManagedKeyData keyData = mock(ManagedKeyData.class);
+ when(keyData.getKeyState()).thenReturn(ACTIVE);
+ when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
+ when(keyData.getKeyMetadata()).thenReturn(metadata);
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
+ Path rootDir = CommonFSUtils.getRootDir(conf);
+ when(masterFS.getTempDir()).thenReturn(rootDir);
+ FSDataOutputStream mockStream = mock(FSDataOutputStream.class);
+ when(mockFileSystem.create(any())).thenReturn(mockStream);
+ when(mockFileSystem.rename(any(), any())).thenReturn(false);
+ String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
+ FileStatus mockFileStatus = createMockFile(fileName);
+ when(mockFileSystem.globStatus(any())).thenReturn(
+ new FileStatus[0],
+ new FileStatus[] { mockFileStatus }
+ );
+
+ manager.ensureSystemKeyInitialized();
+ }
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAccessorMisc extends TestSystemKeyAccessorAndManager {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAccessorMisc.class);
+
+ @Test
+ public void testLoadSystemKeySuccess() throws Exception {
+ Path testPath = new Path("/test/key/path");
+ String testMetadata = "test-metadata";
+
+ // Create test key data
+ Key testKey = new SecretKeySpec("test-key-bytes".getBytes(), "AES");
+ ManagedKeyData testKeyData = new ManagedKeyData(
+ "custodian".getBytes(), "namespace", testKey,
+ ManagedKeyState.ACTIVE, testMetadata, 1000L);
+
+ // Mock key provider
+ ManagedKeyProvider realProvider = mock(ManagedKeyProvider.class);
+ when(realProvider.unwrapKey(testMetadata, null)).thenReturn(testKeyData);
+
+ // Create testable SystemKeyAccessor that overrides both loadKeyMetadata and getKeyProvider
+ SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) {
+ @Override
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ assertEquals(testPath, keyPath);
+ return testMetadata;
+ }
+
+ @Override
+ protected ManagedKeyProvider getKeyProvider() {
+ return realProvider;
+ }
+ };
+
+ ManagedKeyData result = testAccessor.loadSystemKey(testPath);
+ assertEquals(testKeyData, result);
+
+ // Verify the key provider was called correctly
+ verify(realProvider).unwrapKey(testMetadata, null);
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testLoadSystemKeyNullResult() throws Exception {
+ Path testPath = new Path("/test/key/path");
+ String testMetadata = "test-metadata";
+
+ // Mock key provider to return null
+ ManagedKeyProvider realProvider = mock(ManagedKeyProvider.class);
+ when(realProvider.unwrapKey(testMetadata, null)).thenReturn(null);
+
+ SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) {
+ @Override
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ assertEquals(testPath, keyPath);
+ return testMetadata;
+ }
+
+ @Override
+ protected ManagedKeyProvider getKeyProvider() {
+ return realProvider;
+ }
+ };
+
+ testAccessor.loadSystemKey(testPath);
+ }
+
+ @Test
+ public void testExtractSystemKeySeqNumValid() throws Exception {
+ Path testPath1 = new Path(SYSTEM_KEY_FILE_PREFIX + "1");
+ Path testPath123 = new Path(SYSTEM_KEY_FILE_PREFIX + "123");
+ Path testPathMax = new Path(SYSTEM_KEY_FILE_PREFIX + Integer.MAX_VALUE);
+
+ assertEquals(1, SystemKeyAccessor.extractSystemKeySeqNum(testPath1));
+ assertEquals(123, SystemKeyAccessor.extractSystemKeySeqNum(testPath123));
+ assertEquals(Integer.MAX_VALUE, SystemKeyAccessor.extractSystemKeySeqNum(testPathMax));
+ }
+
+
+
+ @Test(expected = IOException.class)
+ public void testGetAllSystemKeyFilesIOException() throws Exception {
+ when(mockFileSystem.globStatus(any())).thenThrow(new IOException("Filesystem error"));
+ systemKeyManager.getAllSystemKeyFiles();
+ }
+
+ @Test(expected = IOException.class)
+ public void testLoadSystemKeyIOExceptionFromMetadata() throws Exception {
+ Path testPath = new Path("/test/key/path");
+
+ SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) {
+ @Override
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ assertEquals(testPath, keyPath);
+ throw new IOException("Metadata read failed");
+ }
+
+ @Override
+ protected ManagedKeyProvider getKeyProvider() {
+ return mock(ManagedKeyProvider.class);
+ }
+ };
+
+ testAccessor.loadSystemKey(testPath);
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testLoadSystemKeyProviderException() throws Exception {
+ Path testPath = new Path("/test/key/path");
+ String testMetadata = "test-metadata";
+
+ SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) {
+ @Override
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ assertEquals(testPath, keyPath);
+ return testMetadata;
+ }
+
+ @Override
+ protected ManagedKeyProvider getKeyProvider() {
+ throw new RuntimeException("Key provider not available");
+ }
+ };
+
+ testAccessor.loadSystemKey(testPath);
+ }
+
+ @Test
+ public void testExtractSystemKeySeqNumBoundaryValues() throws Exception {
+ // Test boundary values
+ Path testPath0 = new Path(SYSTEM_KEY_FILE_PREFIX + "0");
+ Path testPathMin = new Path(SYSTEM_KEY_FILE_PREFIX + Integer.MIN_VALUE);
+
+ assertEquals(0, SystemKeyAccessor.extractSystemKeySeqNum(testPath0));
+ assertEquals(Integer.MIN_VALUE, SystemKeyAccessor.extractSystemKeySeqNum(testPathMin));
+ }
+
+ @Test
+ public void testExtractKeySequenceEdgeCases() throws Exception {
+ // Test various edge cases for extractKeySequence
+ Path validZero = new Path(SYSTEM_KEY_FILE_PREFIX + "0");
+ Path validNegative = new Path(SYSTEM_KEY_FILE_PREFIX + "-1");
+
+ // Valid cases should still work
+ assertEquals(0, SystemKeyAccessor.extractKeySequence(validZero));
+ assertEquals(-1, SystemKeyAccessor.extractKeySequence(validNegative));
+ }
+ }
+
+ private static class MockSystemKeyManager extends SystemKeyManager {
+ private final ManagedKeyProvider keyProvider;
+
+ public MockSystemKeyManager(MasterServices master, ManagedKeyProvider keyProvider)
+ throws IOException {
+ super(master);
+ this.keyProvider = keyProvider;
+ //systemKeyDir = mock(Path.class);
+ }
+
+ @Override
+ protected ManagedKeyProvider getKeyProvider() {
+ return keyProvider;
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
new file mode 100644
index 000000000000..a764a5b7de87
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.security.Key;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase;
+import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestSystemKeyManager extends ManagedKeyTestBase {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSystemKeyManager.class);
+
+ @Test
+ public void testSystemKeyInitializationAndRotation() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeyProvider keyProvider = Encryption.getKeyProvider(master.getConfiguration());
+ assertNotNull(keyProvider);
+ assertTrue(keyProvider instanceof ManagedKeyProvider);
+ assertTrue(keyProvider instanceof MockManagedKeyProvider);
+ MockManagedKeyProvider pbeKeyProvider = (MockManagedKeyProvider) keyProvider;
+ ManagedKeyData initialSystemKey = validateInitialState(master, pbeKeyProvider);
+
+ restartSystem();
+ master = TEST_UTIL.getHBaseCluster().getMaster();
+ validateInitialState(master, pbeKeyProvider);
+
+ // Test rotation of cluster key by changing the key that the key provider provides and restart
+ // master.
+ String newAlias = "new_cluster_key";
+ pbeKeyProvider.setClusterKeyAlias(newAlias);
+ Key newCluterKey = MockManagedKeyProvider.generateSecretKey();
+ pbeKeyProvider.setMockedKey(newAlias, newCluterKey, ManagedKeyData.KEY_SPACE_GLOBAL);
+
+ restartSystem();
+ master = TEST_UTIL.getHBaseCluster().getMaster();
+ SystemKeyAccessor systemKeyAccessor = new SystemKeyAccessor(master);
+ assertEquals(2, systemKeyAccessor.getAllSystemKeyFiles().size());
+ SystemKeyCache systemKeyCache = master.getSystemKeyCache();
+ assertEquals(0, Bytes.compareTo(newCluterKey.getEncoded(),
+ systemKeyCache.getLatestSystemKey().getTheKey().getEncoded()));
+ assertEquals(initialSystemKey,
+ systemKeyAccessor.loadSystemKey(systemKeyAccessor.getAllSystemKeyFiles().get(1)));
+ assertEquals(initialSystemKey,
+ systemKeyCache.getSystemKeyByChecksum(initialSystemKey.getKeyChecksum()));
+ }
+
+ @Test
+ public void testWithInvalidSystemKey() throws Exception {
+ HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ KeyProvider keyProvider = Encryption.getKeyProvider(master.getConfiguration());
+ MockManagedKeyProvider pbeKeyProvider = (MockManagedKeyProvider) keyProvider;
+
+ // Test startup failure when the cluster key is INACTIVE
+ SystemKeyManager tmpCKM = new SystemKeyManager(master);
+ tmpCKM.ensureSystemKeyInitialized();
+ pbeKeyProvider.setMockedKeyState(pbeKeyProvider.getSystemKeyAlias(), ManagedKeyState.INACTIVE);
+ assertThrows(IOException.class, tmpCKM::ensureSystemKeyInitialized);
+ }
+
+ private ManagedKeyData validateInitialState(HMaster master, MockManagedKeyProvider pbeKeyProvider)
+ throws IOException {
+ SystemKeyAccessor systemKeyAccessor = new SystemKeyAccessor(master);
+ assertEquals(1, systemKeyAccessor.getAllSystemKeyFiles().size());
+ SystemKeyCache systemKeyCache = master.getSystemKeyCache();
+ assertNotNull(systemKeyCache);
+ ManagedKeyData clusterKey = systemKeyCache.getLatestSystemKey();
+ assertEquals(pbeKeyProvider.getSystemKey(master.getClusterId().getBytes()), clusterKey);
+ assertEquals(clusterKey,
+ systemKeyCache.getSystemKeyByChecksum(clusterKey.getKeyChecksum()));
+ return clusterKey;
+ }
+
+ private void restartSystem() throws Exception {
+ TEST_UTIL.shutdownMiniHBaseCluster();
+ Thread.sleep(2000);
+ TEST_UTIL.restartHBaseCluster(1);
+ TEST_UTIL.waitFor(60000,
+ () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index da1bc04d7e03..ae507f32fd58 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -38,6 +38,9 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
@@ -215,6 +218,18 @@ public Connection getConnection() {
}
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public FileSystem getFileSystem() {
try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
index 3c55696080e3..18b7744e17cb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
@@ -24,6 +24,9 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
import org.apache.hadoop.hbase.procedure2.store.ProcedureStorePerformanceEvaluation;
@@ -57,6 +60,18 @@ public Configuration getConfiguration() {
public ServerName getServerName() {
return serverName;
}
+
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
}
private MasterRegion region;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 443019bee808..6ed289ab96d1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -46,6 +46,9 @@
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -837,6 +840,18 @@ public ChoreService getChoreService() {
return null;
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public FileSystem getFileSystem() {
return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 58ffdcf91d43..adc420409527 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -55,6 +55,9 @@
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.ipc.SimpleRpcServer;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.security.SecurityInfo;
@@ -350,6 +353,18 @@ public ChoreService getChoreService() {
return null;
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public Connection createConnection(Configuration conf) throws IOException {
return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 90f4a7555b93..9257b78d6ce7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -26,6 +26,9 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.slf4j.Logger;
@@ -100,6 +103,18 @@ public ChoreService getChoreService() {
throw new UnsupportedOperationException();
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public FileSystem getFileSystem() {
throw new UnsupportedOperationException();
diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb
index a9b35ed1de21..9b24e5caa973 100644
--- a/hbase-shell/src/main/ruby/hbase/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase/hbase.rb
@@ -59,6 +59,10 @@ def rsgroup_admin
::Hbase::RSGroupAdmin.new(self.connection)
end
+ def keymeta_admin
+ ::Hbase::KeymetaAdmin.new(@connection)
+ end
+
def taskmonitor
::Hbase::TaskMonitor.new(configuration)
end
diff --git a/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb b/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb
new file mode 100644
index 000000000000..f70abbdde55b
--- /dev/null
+++ b/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb
@@ -0,0 +1,56 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'java'
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyData
+java_import org.apache.hadoop.hbase.keymeta.KeymetaAdminClient
+
+module Hbase
+ # KeymetaAdmin is a class that provides a Ruby interface to the HBase Key Management API.
+ # It is used to interface with the HBase Key Management API.
+ class KeymetaAdmin
+ def initialize(connection)
+ @connection = connection
+ @admin = KeymetaAdminClient.new(connection)
+ @hb_admin = @connection.getAdmin
+ end
+
+ def close
+ @admin.close
+ end
+
+ def enable_key_management(key_info)
+ cust, namespace = extract_cust_info(key_info)
+ @admin.enableKeyManagement(cust, namespace)
+ end
+
+ def get_key_statuses(key_info)
+ cust, namespace = extract_cust_info(key_info)
+ @admin.getManagedKeys(cust, namespace)
+ end
+
+ def extract_cust_info(key_info)
+ cust_info = key_info.split(':')
+ raise(ArgumentError, 'Invalid cust:namespace format') unless [1, 2].include?(cust_info.length)
+
+ [cust_info[0], cust_info.length > 1 ? cust_info[1] : ManagedKeyData::KEY_SPACE_GLOBAL]
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb
index d4df1f8f5821..67892e5538c0 100644
--- a/hbase-shell/src/main/ruby/hbase_constants.rb
+++ b/hbase-shell/src/main/ruby/hbase_constants.rb
@@ -138,3 +138,4 @@ def self.promote_constants(constants)
require 'hbase/security'
require 'hbase/visibility_labels'
require 'hbase/rsgroup_admin'
+require 'hbase/keymeta_admin'
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 46b38dd96b89..10f24c4a0d24 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -150,6 +150,10 @@ def hbase_rsgroup_admin
@rsgroup_admin ||= hbase.rsgroup_admin
end
+ def hbase_keymeta_admin
+ @keymeta_admin ||= hbase.keymeta_admin
+ end
+
##
# Create singleton methods on the target receiver object for all the loaded commands
#
@@ -615,6 +619,18 @@ def self.exception_handler(hide_traceback)
]
)
+Shell.load_command_group(
+ 'keymeta',
+ full_name: 'Keymeta',
+ comment: "NOTE: The KeyMeta Coprocessor Endpoint must be enabled on the Master else commands fail
+ with: UnknownProtocolException: No registered Master Coprocessor Endpoint found for
+ ManagedKeysService",
+ commands: %w[
+ enable_key_management
+ show_key_status
+ ]
+)
+
Shell.load_command_group(
'rsgroup',
full_name: 'RSGroups',
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb
index a40f737e7908..a97dddc4e6a0 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -105,6 +105,10 @@ def rsgroup_admin
@shell.hbase_rsgroup_admin
end
+ def keymeta_admin
+ @shell.hbase_keymeta_admin
+ end
+
#----------------------------------------------------------------------
# Creates formatter instance first time and then reuses it.
def formatter
diff --git a/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb b/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb
new file mode 100644
index 000000000000..9a6d0422ad4e
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # EnableKeyManagement is a class that provides a Ruby interface to enable key management via
+ # HBase Key Management API.
+ class EnableKeyManagement < KeymetaCommandBase
+ def help
+ <<-EOF
+Enable key management for a given cust:namespace (cust in Base64 format).
+If no namespace is specified, the global namespace (*) is used.
+
+Example:
+ hbase> enable_key_management 'cust:namespace'
+ hbase> enable_key_management 'cust'
+ EOF
+ end
+
+ def command(key_info)
+ statuses = keymeta_admin.enable_key_management(key_info)
+ print_key_statuses(statuses)
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb
new file mode 100644
index 000000000000..e2af5f524cc3
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+module Shell
+ module Commands
+ # KeymetaCommandBase is a base class for all key management commands.
+ class KeymetaCommandBase < Command
+ def print_key_statuses(statuses)
+ formatter.header(%w[ENCODED-KEY NAMESPACE STATUS METADATA METADATA-HASH REFRESH-TIMESTAMP])
+ statuses.each { |status| formatter.row(format_status_row(status)) }
+ formatter.footer(statuses.size)
+ end
+
+ private
+
+ def format_status_row(status)
+ [
+ status.getKeyCustodianEncoded,
+ status.getKeyNamespace,
+ status.getKeyStatus.toString,
+ status.getKeyMetadata,
+ status.getKeyMetadataHashEncoded,
+ status.getRefreshTimestamp
+ ]
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb b/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb
new file mode 100644
index 000000000000..d3670d094ed3
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# frozen_string_literal: true
+
+require 'shell/commands/keymeta_command_base'
+
+module Shell
+ module Commands
+ # ShowKeyStatus is a class that provides a Ruby interface to show key statuses via
+ # HBase Key Management API.
+ class ShowKeyStatus < KeymetaCommandBase
+ def help
+ <<-EOF
+Show key statuses for a given cust:namespace (cust in Base64 format).
+If no namespace is specified, the global namespace (*) is used.
+
+Example:
+ hbase> show_key_status 'cust:namespace'
+ hbase> show_key_status 'cust'
+ EOF
+ end
+
+ def command(key_info)
+ statuses = keymeta_admin.get_key_statuses(key_info)
+ print_key_statuses(statuses)
+ end
+ end
+ end
+end
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index dc4bc1816acc..659bfe34067a 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -89,6 +89,7 @@
import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdminClient;
import org.apache.hadoop.hbase.logging.Log4jUtils;
import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
import org.apache.hadoop.hbase.master.HMaster;
@@ -201,6 +202,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/** This is for unit tests parameterized with a single boolean. */
public static final List MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination();
+ private Admin hbaseAdmin = null;
+
/**
* Checks to see if a specific port is available.
* @param port the port number to check for availability
@@ -2942,7 +2945,9 @@ public Admin getAdmin() throws IOException {
return hbaseAdmin;
}
- private Admin hbaseAdmin = null;
+ public KeymetaAdminClient getKeymetaAdmin() throws IOException {
+ return new KeymetaAdminClient(getConnection());
+ }
/**
* Returns an {@link Hbck} instance. Needs be closed when done.
From 281dc166c2ccab169f1de95d8b87f746f1363d0a Mon Sep 17 00:00:00 2001
From: Hari Krishna Dara
Date: Fri, 12 Sep 2025 23:48:10 +0530
Subject: [PATCH 2/9] Fix compilation error in HBASE-29368 feature branch
(#7298)
---
.../org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java | 1 +
1 file changed, 1 insertion(+)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
index c44e7d45061b..f7afa7ee5891 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
@@ -223,6 +223,7 @@ public void testGenericCacheForNonExistentKey() throws Exception {
verify(testProvider).unwrapKey(any(String.class), any());
}
+ @Test
public void testWithInvalidProvider() throws Exception {
ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
doThrow(new IOException("Test exception")).when(testProvider).unwrapKey(any(String.class),
From d6da6fd0eaa08bc940e1a2e2a46e3b4fb231e7bc Mon Sep 17 00:00:00 2001
From: Viraj Jasani
Date: Sat, 20 Sep 2025 14:43:53 -0700
Subject: [PATCH 3/9] spotless fixes
Co-authored-by: Hari Dara
---
.../hbase/keymeta/KeymetaAdminClient.java | 23 ++--
.../hadoop/hbase/security/EncryptionUtil.java | 16 +--
.../org/apache/hadoop/hbase/HConstants.java | 12 +-
.../hadoop/hbase/io/crypto/Encryption.java | 14 +-
.../hbase/io/crypto/KeyStoreKeyProvider.java | 2 +-
.../hbase/io/crypto/ManagedKeyData.java | 80 ++++--------
.../hbase/io/crypto/ManagedKeyProvider.java | 29 ++---
.../hbase/io/crypto/ManagedKeyState.java | 6 +-
.../io/crypto/ManagedKeyStoreKeyProvider.java | 26 ++--
.../hadoop/hbase/keymeta/KeymetaAdmin.java | 12 +-
.../org/apache/hadoop/hbase/util/Bytes.java | 2 -
.../apache/hadoop/hbase/util/GsonUtil.java | 1 -
.../io/crypto/MockManagedKeyProvider.java | 37 +++---
.../io/crypto/TestKeyStoreKeyProvider.java | 12 +-
.../hbase/io/crypto/TestManagedKeyData.java | 23 ++--
.../io/crypto/TestManagedKeyProvider.java | 85 ++++++------
.../hbase/MockRegionServerServices.java | 9 +-
.../java/org/apache/hadoop/hbase/Server.java | 14 +-
.../hbase/keymeta/KeyManagementBase.java | 48 ++++---
.../hbase/keymeta/KeymetaAdminImpl.java | 11 +-
.../hbase/keymeta/KeymetaMasterService.java | 15 +--
.../hbase/keymeta/KeymetaServiceEndpoint.java | 38 +++---
.../hbase/keymeta/KeymetaTableAccessor.java | 91 ++++++-------
.../hbase/keymeta/ManagedKeyDataCache.java | 88 ++++++-------
.../hbase/keymeta/SystemKeyAccessor.java | 24 ++--
.../hadoop/hbase/keymeta/SystemKeyCache.java | 3 +-
.../hadoop/hbase/master/SplitWALManager.java | 1 +
.../hadoop/hbase/master/SystemKeyManager.java | 53 ++++----
.../regionserver/ReplicationSyncUp.java | 9 +-
.../hbase/keymeta/DummyKeyProvider.java | 3 +-
.../ManagedKeyProviderInterceptor.java | 3 +-
.../hbase/keymeta/ManagedKeyTestBase.java | 7 +-
.../hbase/keymeta/TestKeyManagementBase.java | 8 +-
.../hbase/keymeta/TestKeymetaEndpoint.java | 65 +++++-----
.../keymeta/TestKeymetaMasterService.java | 3 +-
.../keymeta/TestKeymetaTableAccessor.java | 41 +++---
.../keymeta/TestManagedKeyDataCache.java | 121 ++++++++----------
.../hbase/keymeta/TestManagedKeymeta.java | 21 ++-
.../hbase/keymeta/TestSystemKeyCache.java | 23 ++--
.../hbase/master/MockNoopMasterServices.java | 9 +-
.../hadoop/hbase/master/MockRegionServer.java | 9 +-
.../hbase/master/TestActiveMasterManager.java | 9 +-
.../hbase/master/TestKeymetaAdminImpl.java | 44 ++-----
.../TestSystemKeyAccessorAndManager.java | 71 +++++-----
.../hbase/master/TestSystemKeyManager.java | 7 +-
.../cleaner/TestReplicationHFileCleaner.java | 9 +-
...onProcedureStorePerformanceEvaluation.java | 9 +-
.../regionserver/TestHeapMemoryManager.java | 9 +-
.../token/TestTokenAuthentication.java | 9 +-
.../apache/hadoop/hbase/util/MockServer.java | 9 +-
50 files changed, 567 insertions(+), 706 deletions(-)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
index 8092dee1fc37..e72e3c978ada 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
@@ -21,7 +21,6 @@
import java.security.KeyException;
import java.util.ArrayList;
import java.util.List;
-
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
@@ -42,13 +41,13 @@ public class KeymetaAdminClient implements KeymetaAdmin {
private ManagedKeysProtos.ManagedKeysService.BlockingInterface stub;
public KeymetaAdminClient(Connection conn) throws IOException {
- this.stub = ManagedKeysProtos.ManagedKeysService.newBlockingStub(
- conn.getAdmin().coprocessorService());
+ this.stub =
+ ManagedKeysProtos.ManagedKeysService.newBlockingStub(conn.getAdmin().coprocessorService());
}
@Override
public List enableKeyManagement(String keyCust, String keyNamespace)
- throws IOException {
+ throws IOException {
try {
ManagedKeysProtos.GetManagedKeysResponse response = stub.enableKeyManagement(null,
ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build());
@@ -70,16 +69,14 @@ public List getManagedKeys(String keyCust, String keyNamespace)
}
}
- private static List generateKeyDataList(
- ManagedKeysProtos.GetManagedKeysResponse stateResponse) {
+ private static List
+ generateKeyDataList(ManagedKeysProtos.GetManagedKeysResponse stateResponse) {
List keyStates = new ArrayList<>();
- for (ManagedKeysResponse state: stateResponse.getStateList()) {
- keyStates.add(new ManagedKeyData(
- state.getKeyCustBytes().toByteArray(),
- state.getKeyNamespace(), null,
- ManagedKeyState.forValue((byte) state.getKeyState().getNumber()),
- state.getKeyMetadata(),
- state.getRefreshTimestamp()));
+ for (ManagedKeysResponse state : stateResponse.getStateList()) {
+ keyStates
+ .add(new ManagedKeyData(state.getKeyCustBytes().toByteArray(), state.getKeyNamespace(),
+ null, ManagedKeyState.forValue((byte) state.getKeyState().getNumber()),
+ state.getKeyMetadata(), state.getRefreshTimestamp()));
}
return keyStates;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 91630215e75d..b06ca9ce0d1e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -94,7 +94,7 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws
* @return the encrypted key bytes
*/
public static byte[] wrapKey(Configuration conf, String subject, Key key, Key kek)
- throws IOException {
+ throws IOException {
// Wrap the key with the configured encryption algorithm.
String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Cipher cipher = Encryption.getCipher(conf, algorithm);
@@ -117,8 +117,7 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key, Key ke
ByteArrayOutputStream out = new ByteArrayOutputStream();
if (kek != null) {
Encryption.encryptWithGivenKey(kek, out, new ByteArrayInputStream(keyBytes), cipher, iv);
- }
- else {
+ } else {
Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf,
cipher, iv);
}
@@ -149,7 +148,7 @@ public static Key unwrapKey(Configuration conf, String subject, byte[] value)
* @param conf configuration
* @param subject subject key alias
* @param value the encrypted key bytes
- * @param kek the key encryption key
+ * @param kek the key encryption key
* @return the raw key bytes
*/
public static Key unwrapKey(Configuration conf, String subject, byte[] value, Key kek)
@@ -165,8 +164,8 @@ public static Key unwrapKey(Configuration conf, String subject, byte[] value, Ke
}
private static Key getUnwrapKey(Configuration conf, String subject,
- EncryptionProtos.WrappedKey wrappedKey, Cipher cipher, Key kek)
- throws IOException, KeyException {
+ EncryptionProtos.WrappedKey wrappedKey, Cipher cipher, Key kek)
+ throws IOException, KeyException {
String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf);
String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim();
if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) {
@@ -181,9 +180,8 @@ private static Key getUnwrapKey(Configuration conf, String subject,
byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null;
if (kek != null) {
Encryption.decryptWithGivenKey(kek, out, wrappedKey.getData().newInput(),
- wrappedKey.getLength(), cipher, iv);
- }
- else {
+ wrappedKey.getLength(), cipher, iv);
+ } else {
Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(),
subject, conf, cipher, iv);
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index a8399941d6f4..b9dfa9afc5d8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1310,8 +1310,10 @@ public enum OperationStatusCode {
/** Configuration key for enabling WAL encryption, a boolean */
public static final String ENABLE_WAL_ENCRYPTION = "hbase.regionserver.wal.encryption";
- /** Property used by ManagedKeyStoreKeyProvider class to set the alias that identifies
- * the current system key. */
+ /**
+ * Property used by ManagedKeyStoreKeyProvider class to set the alias that identifies the current
+ * system key.
+ */
public static final String CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY =
"hbase.crypto.managed_key_store.system.key.name";
public static final String CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX =
@@ -1322,8 +1324,10 @@ public enum OperationStatusCode {
"hbase.crypto.managed_keys.enabled";
public static final boolean CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED = false;
- /** Enables or disables key lookup during data path as an alternative to static injection of keys
- * using control path. */
+ /**
+ * Enables or disables key lookup during data path as an alternative to static injection of keys
+ * using control path.
+ */
public static final String CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY =
"hbase.crypto.managed_keys.dynamic_lookup.enabled";
public static final boolean CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_DEFAULT_ENABLED = true;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 336c440c4493..a176a4329422 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -33,7 +33,6 @@
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -481,8 +480,8 @@ public static void encryptWithSubjectKey(OutputStream out, InputStream in, Strin
* @param cipher the encryption algorithm
* @param iv the initialization vector, can be null
*/
- public static void encryptWithGivenKey(Key key, OutputStream out, InputStream in,
- Cipher cipher, byte[] iv) throws IOException {
+ public static void encryptWithGivenKey(Key key, OutputStream out, InputStream in, Cipher cipher,
+ byte[] iv) throws IOException {
Encryptor e = cipher.getEncryptor();
e.setKey(key);
e.setIv(iv); // can be null
@@ -513,8 +512,8 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o
String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY);
if (alternateAlgorithm != null) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Unable to decrypt data with current cipher algorithm '" + conf.get(
- HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
+ LOG.debug("Unable to decrypt data with current cipher algorithm '"
+ + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
+ "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm
+ "' configured.");
}
@@ -523,15 +522,14 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o
throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available");
}
decryptWithGivenKey(key, out, in, outLen, alterCipher, iv);
- }
- else {
+ } else {
throw e;
}
}
}
public static void decryptWithGivenKey(Key key, OutputStream out, InputStream in, int outLen,
- Cipher cipher, byte[] iv) throws IOException {
+ Cipher cipher, byte[] iv) throws IOException {
Decryptor d = cipher.getDecryptor();
d.setKey(key);
d.setIv(iv); // can be null
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
index c401d3b3f6b9..f79ae100ebc9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
@@ -180,7 +180,7 @@ public Key getKey(String alias) {
} catch (UnrecoverableKeyException e) {
try {
return store.getKey(alias, getAliasPassword(alias));
- } catch (UnrecoverableKeyException|NoSuchAlgorithmException|KeyStoreException e2) {
+ } catch (UnrecoverableKeyException | NoSuchAlgorithmException | KeyStoreException e2) {
// Ignore.
}
throw new RuntimeException(e);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
index ca8d55f97faa..e9c00935d38e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
@@ -24,7 +24,6 @@
import java.util.Base64;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
-
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.DataChecksum;
import org.apache.yetus.audience.InterfaceAudience;
@@ -35,21 +34,17 @@
* This class represents an encryption key data which includes the key itself, its state, metadata
* and a prefix. The metadata encodes enough information on the key such that it can be used to
* retrieve the exact same key again in the future. If the key state is
- * {@link ManagedKeyState#FAILED} expect the key to be {@code null}.
- *
- * The key data is represented by the following fields:
+ * {@link ManagedKeyState#FAILED} expect the key to be {@code null}. The key data is represented by
+ * the following fields:
*
* key_cust: The prefix for which this key belongs to
* theKey: The key capturing the bytes and encoding
* keyState: The state of the key (see {@link ManagedKeyState})
* keyMetadata: Metadata that identifies the key
*
- *
- * The class provides methods to retrieve, as well as to compute a checksum
- * for the key data. The checksum is used to ensure the integrity of the key data.
- *
- * The class also provides a method to generate an MD5 hash of the key metadata, which can be used
- * for validation and identification.
+ * The class provides methods to retrieve, as well as to compute a checksum for the key data. The
+ * checksum is used to ensure the integrity of the key data. The class also provides a method to
+ * generate an MD5 hash of the key metadata, which can be used for validation and identification.
*/
@InterfaceAudience.Public
public class ManagedKeyData {
@@ -76,34 +71,32 @@ public class ManagedKeyData {
/**
* Constructs a new instance with the given parameters.
- *
- * @param key_cust The key custodian.
- * @param theKey The actual key, can be {@code null}.
+ * @param key_cust The key custodian.
+ * @param theKey The actual key, can be {@code null}.
* @param keyState The state of the key.
- * @param keyMetadata The metadata associated with the key.
+ * @param keyMetadata The metadata associated with the key.
* @throws NullPointerException if any of key_cust, keyState or keyMetadata is null.
*/
public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, ManagedKeyState keyState,
- String keyMetadata) {
+ String keyMetadata) {
this(key_cust, key_namespace, theKey, keyState, keyMetadata,
- EnvironmentEdgeManager.currentTime());
+ EnvironmentEdgeManager.currentTime());
}
/**
* Constructs a new instance with the given parameters including refresh timestamp.
- *
- * @param key_cust The key custodian.
- * @param theKey The actual key, can be {@code null}.
- * @param keyState The state of the key.
- * @param keyMetadata The metadata associated with the key.
+ * @param key_cust The key custodian.
+ * @param theKey The actual key, can be {@code null}.
+ * @param keyState The state of the key.
+ * @param keyMetadata The metadata associated with the key.
* @param refreshTimestamp The refresh timestamp for the key.
* @throws NullPointerException if any of key_cust, keyState or keyMetadata is null.
*/
public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, ManagedKeyState keyState,
- String keyMetadata, long refreshTimestamp) {
+ String keyMetadata, long refreshTimestamp) {
Preconditions.checkNotNull(key_cust, "key_cust should not be null");
Preconditions.checkNotNull(key_namespace, "key_namespace should not be null");
- Preconditions.checkNotNull(keyState, "keyState should not be null");
+ Preconditions.checkNotNull(keyState, "keyState should not be null");
// Only check for null metadata if state is not FAILED
if (keyState != ManagedKeyState.FAILED) {
Preconditions.checkNotNull(keyMetadata, "keyMetadata should not be null");
@@ -120,12 +113,11 @@ public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, Managed
@InterfaceAudience.Private
public ManagedKeyData cloneWithoutKey() {
return new ManagedKeyData(keyCustodian, keyNamespace, null, keyState, keyMetadata,
- refreshTimestamp);
+ refreshTimestamp);
}
/**
* Returns the custodian associated with the key.
- *
* @return The key custodian as a byte array.
*/
public byte[] getKeyCustodian() {
@@ -140,10 +132,8 @@ public String getKeyCustodianEncoded() {
return Base64.getEncoder().encodeToString(keyCustodian);
}
-
/**
* Returns the namespace associated with the key.
- *
* @return The namespace as a {@code String}.
*/
public String getKeyNamespace() {
@@ -152,7 +142,6 @@ public String getKeyNamespace() {
/**
* Returns the actual key.
- *
* @return The key as a {@code Key} object.
*/
public Key getTheKey() {
@@ -161,7 +150,6 @@ public Key getTheKey() {
/**
* Returns the state of the key.
- *
* @return The key state as a {@code ManagedKeyState} enum value.
*/
public ManagedKeyState getKeyState() {
@@ -170,7 +158,6 @@ public ManagedKeyState getKeyState() {
/**
* Returns the metadata associated with the key.
- *
* @return The key metadata as a {@code String}.
*/
public String getKeyMetadata() {
@@ -179,7 +166,6 @@ public String getKeyMetadata() {
/**
* Returns the refresh timestamp of the key.
- *
* @return The refresh timestamp as a long value.
*/
public long getRefreshTimestamp() {
@@ -188,20 +174,14 @@ public long getRefreshTimestamp() {
@Override
public String toString() {
- return "ManagedKeyData{" +
- "keyCustodian=" + Arrays.toString(keyCustodian) +
- ", keyNamespace='" + keyNamespace + '\'' +
- ", keyState=" + keyState +
- ", keyMetadata='" + keyMetadata + '\'' +
- ", refreshTimestamp=" + refreshTimestamp +
- ", keyChecksum=" + getKeyChecksum() +
- '}';
+ return "ManagedKeyData{" + "keyCustodian=" + Arrays.toString(keyCustodian) + ", keyNamespace='"
+ + keyNamespace + '\'' + ", keyState=" + keyState + ", keyMetadata='" + keyMetadata + '\''
+ + ", refreshTimestamp=" + refreshTimestamp + ", keyChecksum=" + getKeyChecksum() + '}';
}
/**
* Computes the checksum of the key. If the checksum has already been computed, this method
* returns the previously computed value. The checksum is computed using the CRC32C algorithm.
- *
* @return The checksum of the key as a long value, {@code 0} if no key is available.
*/
public long getKeyChecksum() {
@@ -223,7 +203,6 @@ public static long constructKeyChecksum(byte[] data) {
/**
* Computes the hash of the key metadata. If the hash has already been computed, this method
* returns the previously computed value. The hash is computed using the MD5 algorithm.
- *
* @return The hash of the key metadata as a byte array.
*/
public byte[] getKeyMetadataHash() {
@@ -267,23 +246,14 @@ public boolean equals(Object o) {
ManagedKeyData that = (ManagedKeyData) o;
- return new EqualsBuilder()
- .append(keyCustodian, that.keyCustodian)
- .append(keyNamespace, that.keyNamespace)
- .append(theKey, that.theKey)
- .append(keyState, that.keyState)
- .append(keyMetadata, that.keyMetadata)
- .isEquals();
+ return new EqualsBuilder().append(keyCustodian, that.keyCustodian)
+ .append(keyNamespace, that.keyNamespace).append(theKey, that.theKey)
+ .append(keyState, that.keyState).append(keyMetadata, that.keyMetadata).isEquals();
}
@Override
public int hashCode() {
- return new HashCodeBuilder(17, 37)
- .append(keyCustodian)
- .append(keyNamespace)
- .append(theKey)
- .append(keyState)
- .append(keyMetadata)
- .toHashCode();
+ return new HashCodeBuilder(17, 37).append(keyCustodian).append(keyNamespace).append(theKey)
+ .append(keyState).append(keyMetadata).toHashCode();
}
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
index 27cd91380d6e..512f78a1f9f5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
@@ -17,32 +17,27 @@
*/
package org.apache.hadoop.hbase.io.crypto;
+import edu.umd.cs.findbugs.annotations.NonNull;
import java.io.IOException;
import java.util.Base64;
-import edu.umd.cs.findbugs.annotations.NonNull;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.yetus.audience.InterfaceAudience;
/**
- * Interface for key providers of managed keys. Defines methods for generating and managing
- * managed keys, as well as handling key storage and retrieval.
- *
- * The interface extends the basic {@link KeyProvider} interface with additional
- * methods for working with managed keys.
+ * Interface for key providers of managed keys. Defines methods for generating and managing managed
+ * keys, as well as handling key storage and retrieval. The interface extends the basic
+ * {@link KeyProvider} interface with additional methods for working with managed keys.
*/
@InterfaceAudience.Public
public interface ManagedKeyProvider extends KeyProvider {
/**
* Initialize the provider with the given configuration.
- *
* @param conf Hadoop configuration
*/
void initConfig(Configuration conf);
/**
* Retrieve the system key using the given system identifier.
- *
* @param systemId system identifier
* @return ManagedKeyData for the system key and is guaranteed to be not {@code null}
* @throws IOException if an error occurs while retrieving the key
@@ -51,8 +46,7 @@ public interface ManagedKeyProvider extends KeyProvider {
/**
* Retrieve a managed key for the specified prefix.
- *
- * @param key_cust The key custodian.
+ * @param key_cust The key custodian.
* @param key_namespace Key namespace
* @return ManagedKeyData for the system key and is expected to be not {@code null}
* @throws IOException if an error occurs while retrieving the key
@@ -64,14 +58,14 @@ public interface ManagedKeyProvider extends KeyProvider {
* same key provider via the {@link #getSystemKey(byte[])} or
* {@link #getManagedKey(byte[], String)} methods. If key couldn't be retrieved using metadata and
* the wrappedKey is provided, the implementation may try to decrypt it as a fallback operation.
- *
* @param keyMetaData Key metadata, must not be {@code null}.
- * @param wrappedKey The DEK key material encrypted with the corresponding KEK, if available.
+ * @param wrappedKey The DEK key material encrypted with the corresponding KEK, if available.
* @return ManagedKeyData for the key represented by the metadata and is expected to be not
* {@code null}
* @throws IOException if an error occurs while generating the key
*/
- @NonNull ManagedKeyData unwrapKey(String keyMetaData, byte[] wrappedKey) throws IOException;
+ @NonNull
+ ManagedKeyData unwrapKey(String keyMetaData, byte[] wrappedKey) throws IOException;
/**
* Decode the given key custodian which is encoded as Base64 string.
@@ -83,10 +77,9 @@ static byte[] decodeToBytes(String encodedKeyCust) throws IOException {
byte[] key_cust;
try {
key_cust = Base64.getDecoder().decode(encodedKeyCust);
- }
- catch (IllegalArgumentException e) {
- throw new IOException("Failed to decode specified key custodian as Base64 string: "
- + encodedKeyCust, e);
+ } catch (IllegalArgumentException e) {
+ throw new IOException(
+ "Failed to decode specified key custodian as Base64 string: " + encodedKeyCust, e);
}
return key_cust;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java
index ea64355fc56b..2947addf5f8a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java
@@ -33,8 +33,7 @@ public enum ManagedKeyState {
/** Represents the retrieval failure status of a managed key. */
FAILED((byte) 3),
/** Represents the disabled status of a managed key. */
- DISABLED((byte) 4),
- ;
+ DISABLED((byte) 4),;
private static Map lookupByVal;
@@ -60,7 +59,7 @@ public byte getVal() {
public static ManagedKeyState forValue(byte val) {
if (lookupByVal == null) {
Map tbl = new HashMap<>();
- for (ManagedKeyState e: ManagedKeyState.values()) {
+ for (ManagedKeyState e : ManagedKeyState.values()) {
tbl.put(e.getVal(), e);
}
lookupByVal = tbl;
@@ -70,7 +69,6 @@ public static ManagedKeyState forValue(byte val) {
/**
* This is used to determine if a key is usable for encryption/decryption.
- *
* @param state The key state to check
* @return true if the key state is ACTIVE or INACTIVE, false otherwise
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
index b9005e1b27e7..74f892f7ad89 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
@@ -22,7 +22,6 @@
import java.security.Key;
import java.util.HashMap;
import java.util.Map;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.GsonUtil;
@@ -34,7 +33,8 @@ public class ManagedKeyStoreKeyProvider extends KeyStoreKeyProvider implements M
public static final String KEY_METADATA_CUST = "KeyCustodian";
private static final java.lang.reflect.Type KEY_METADATA_TYPE =
- new TypeToken>(){}.getType();
+ new TypeToken>() {
+ }.getType();
private Configuration conf;
@@ -46,8 +46,8 @@ public void initConfig(Configuration conf) {
@Override
public ManagedKeyData getSystemKey(byte[] clusterId) {
checkConfig();
- String systemKeyAlias = conf.get(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY,
- null);
+ String systemKeyAlias =
+ conf.get(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, null);
if (systemKeyAlias == null) {
throw new RuntimeException("No alias configured for system key");
}
@@ -56,29 +56,29 @@ public ManagedKeyData getSystemKey(byte[] clusterId) {
throw new RuntimeException("Unable to find system key with alias: " + systemKeyAlias);
}
// Encode clusterId too for consistency with that of key custodian.
- String keyMetadata = generateKeyMetadata(systemKeyAlias,
- ManagedKeyProvider.encodeToStr(clusterId));
+ String keyMetadata =
+ generateKeyMetadata(systemKeyAlias, ManagedKeyProvider.encodeToStr(clusterId));
return new ManagedKeyData(clusterId, ManagedKeyData.KEY_SPACE_GLOBAL, key,
- ManagedKeyState.ACTIVE, keyMetadata);
+ ManagedKeyState.ACTIVE, keyMetadata);
}
@Override
public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException {
checkConfig();
String encodedCust = ManagedKeyProvider.encodeToStr(key_cust);
- String aliasConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + "." +
- "alias";
+ String aliasConfKey =
+ HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + "." + "alias";
String keyMetadata = generateKeyMetadata(conf.get(aliasConfKey, null), encodedCust);
return unwrapKey(keyMetadata, null);
}
@Override
public ManagedKeyData unwrapKey(String keyMetadataStr, byte[] wrappedKey) throws IOException {
- Map keyMetadata = GsonUtil.getDefaultInstance().fromJson(keyMetadataStr,
- KEY_METADATA_TYPE);
+ Map keyMetadata =
+ GsonUtil.getDefaultInstance().fromJson(keyMetadataStr, KEY_METADATA_TYPE);
String encodedCust = keyMetadata.get(KEY_METADATA_CUST);
- String activeStatusConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust +
- ".active";
+ String activeStatusConfKey =
+ HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + ".active";
boolean isActive = conf.getBoolean(activeStatusConfKey, true);
byte[] key_cust = ManagedKeyProvider.decodeToBytes(encodedCust);
String alias = keyMetadata.get(KEY_METADATA_ALIAS);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
index 2e52dccc0598..be4f36d88023 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
@@ -20,24 +20,21 @@
import java.io.IOException;
import java.security.KeyException;
import java.util.List;
-
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.yetus.audience.InterfaceAudience;
/**
- * KeymetaAdmin is an interface for administrative functions related to managed keys.
- * It handles the following methods:
+ * KeymetaAdmin is an interface for administrative functions related to managed keys. It handles the
+ * following methods:
*/
@InterfaceAudience.Public
public interface KeymetaAdmin {
/**
* Enables key management for the specified custodian and namespace.
- *
* @param keyCust The key custodian in base64 encoded format.
* @param keyNamespace The namespace for the key management.
- *
* @return The list of {@link ManagedKeyData} objects each identifying the key and its current
- * status.
+ * status.
* @throws IOException if an error occurs while enabling key management.
*/
List enableKeyManagement(String keyCust, String keyNamespace)
@@ -45,11 +42,10 @@ List enableKeyManagement(String keyCust, String keyNamespace)
/**
* Get the status of all the keys for the specified custodian.
- *
* @param keyCust The key custodian in base64 encoded format.
* @param keyNamespace The namespace for the key management.
* @return The list of {@link ManagedKeyData} objects each identifying the key and its current
- * status.
+ * status.
* @throws IOException if an error occurs while enabling key management.
*/
List getManagedKeys(String keyCust, String keyNamespace)
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index 55da4b3b12c0..1b2938b9f9b5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -1688,7 +1688,6 @@ public static byte[] add(final byte[] a, final byte[] b) {
/**
* Concatenate byte arrays.
- *
* @param a first third
* @param b second third
* @param c third third
@@ -1700,7 +1699,6 @@ public static byte[] add(final byte[] a, final byte[] b, final byte[] c) {
/**
* Concatenate byte arrays.
- *
* @param a first fourth
* @param b second fourth
* @param c third fourth
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
index a4a8ce82b2a8..2d44faf9511c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
@@ -19,7 +19,6 @@
import java.io.IOException;
import java.util.concurrent.atomic.LongAdder;
-
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.gson.Gson;
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
index a3397f96df70..99c9c132d7d4 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
@@ -23,7 +23,6 @@
import java.util.HashMap;
import java.util.Map;
import javax.crypto.KeyGenerator;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
@@ -31,14 +30,14 @@
/**
* A simple implementation of ManagedKeyProvider for testing. It generates a key on demand given a
- * prefix. One can control the state of a key by calling setKeyState and can rotate a key by
- * calling setKey.
+ * prefix. One can control the state of a key by calling setKeyState and can rotate a key by calling
+ * setKey.
*/
public class MockManagedKeyProvider extends MockAesKeyProvider implements ManagedKeyProvider {
protected static final Logger LOG = LoggerFactory.getLogger(MockManagedKeyProvider.class);
private boolean multikeyGenMode;
- private Map> keys = new HashMap<>();
+ private Map> keys = new HashMap<>();
private Map> lastGenKeyData = new HashMap<>();
// Keep references of all generated keys by their full and partial metadata.
private Map allGeneratedKeys = new HashMap<>();
@@ -47,7 +46,7 @@ public class MockManagedKeyProvider extends MockAesKeyProvider implements Manage
@Override
public void initConfig(Configuration conf) {
- // NO-OP
+ // NO-OP
}
@Override
@@ -56,8 +55,7 @@ public ManagedKeyData getSystemKey(byte[] systemId) throws IOException {
}
@Override
- public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace)
- throws IOException {
+ public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException {
String alias = Bytes.toString(key_cust);
return getKey(key_cust, alias, key_namespace);
}
@@ -68,28 +66,26 @@ public ManagedKeyData unwrapKey(String keyMetadata, byte[] wrappedKey) throws IO
if (allGeneratedKeys.containsKey(keyMetadata)) {
ManagedKeyState keyState = this.keyState.get(meta_toks[1]);
ManagedKeyData managedKeyData =
- new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2],
- allGeneratedKeys.get(keyMetadata),
+ new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2], allGeneratedKeys.get(keyMetadata),
keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata);
return registerKeyData(meta_toks[1], managedKeyData);
}
- return new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2],
- null, ManagedKeyState.FAILED, keyMetadata);
+ return new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2], null, ManagedKeyState.FAILED,
+ keyMetadata);
}
public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace) {
- if (! lastGenKeyData.containsKey(keyNamespace)) {
+ if (!lastGenKeyData.containsKey(keyNamespace)) {
return null;
}
return lastGenKeyData.get(keyNamespace).get(alias);
}
private ManagedKeyData registerKeyData(String alias, ManagedKeyData managedKeyData) {
- if (! lastGenKeyData.containsKey(managedKeyData.getKeyNamespace())) {
+ if (!lastGenKeyData.containsKey(managedKeyData.getKeyNamespace())) {
lastGenKeyData.put(managedKeyData.getKeyNamespace(), new HashMap<>());
}
- lastGenKeyData.get(managedKeyData.getKeyNamespace()).put(alias,
- managedKeyData);
+ lastGenKeyData.get(managedKeyData.getKeyNamespace()).put(alias, managedKeyData);
return managedKeyData;
}
@@ -102,7 +98,7 @@ public void setMockedKeyState(String alias, ManagedKeyState status) {
}
public void setMockedKey(String alias, Key key, String keyNamespace) {
- if (! keys.containsKey(keyNamespace)) {
+ if (!keys.containsKey(keyNamespace)) {
keys.put(keyNamespace, new HashMap<>());
}
Map keysForSpace = keys.get(keyNamespace);
@@ -139,13 +135,13 @@ public static Key generateSecretKey() {
private ManagedKeyData getKey(byte[] key_cust, String alias, String key_namespace) {
ManagedKeyState keyState = this.keyState.get(alias);
- if (! keys.containsKey(key_namespace)) {
+ if (!keys.containsKey(key_namespace)) {
keys.put(key_namespace, new HashMap<>());
}
Map keySpace = keys.get(key_namespace);
Key key = null;
if (keyState != ManagedKeyState.FAILED && keyState != ManagedKeyState.DISABLED) {
- if (multikeyGenMode || ! keySpace.containsKey(alias)) {
+ if (multikeyGenMode || !keySpace.containsKey(alias)) {
key = generateSecretKey();
keySpace.put(alias, key);
}
@@ -159,9 +155,8 @@ private ManagedKeyData getKey(byte[] key_cust, String alias, String key_namespac
String keyMetadata = partialMetadata + ":" + key_namespace + ":" + checksum;
allGeneratedKeys.put(partialMetadata, key);
allGeneratedKeys.put(keyMetadata, key);
- ManagedKeyData managedKeyData =
- new ManagedKeyData(key_cust, key_namespace, key,
- keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata);
+ ManagedKeyData managedKeyData = new ManagedKeyData(key_cust, key_namespace, key,
+ keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata);
return registerKeyData(alias, managedKeyData);
}
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
index de91aa904581..a0304e6337fb 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
@@ -67,12 +67,9 @@ public class TestKeyStoreKeyProvider {
@Parameterized.Parameters(name = "withPasswordOnAlias={0} withPasswordFile={1}")
public static Collection parameters() {
- return Arrays.asList(new Object[][] {
- { Boolean.TRUE, Boolean.TRUE },
- { Boolean.TRUE, Boolean.FALSE },
- { Boolean.FALSE, Boolean.TRUE },
- { Boolean.FALSE, Boolean.FALSE },
- });
+ return Arrays
+ .asList(new Object[][] { { Boolean.TRUE, Boolean.TRUE }, { Boolean.TRUE, Boolean.FALSE },
+ { Boolean.FALSE, Boolean.TRUE }, { Boolean.FALSE, Boolean.FALSE }, });
}
@Before
@@ -109,8 +106,7 @@ public void setUp() throws Exception {
if (withPasswordFile) {
provider.init("jceks://" + storeFile.toURI().getPath() + "?passwordFile="
+ URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8"));
- }
- else {
+ } else {
provider.init("jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD);
}
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java
index 96b58a17b8e0..555bf66b0e0d 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java
@@ -28,9 +28,7 @@
import java.security.Key;
import java.security.NoSuchAlgorithmException;
import java.util.Base64;
-
import javax.crypto.KeyGenerator;
-
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -88,8 +86,8 @@ public void testConstructorNullChecks() {
@Test
public void testConstructorWithFailedStateAndNullMetadata() {
- ManagedKeyData keyData = new ManagedKeyData(keyCust, keyNamespace, null,
- ManagedKeyState.FAILED, null);
+ ManagedKeyData keyData =
+ new ManagedKeyData(keyCust, keyNamespace, null, ManagedKeyState.FAILED, null);
assertNotNull(keyData);
assertEquals(ManagedKeyState.FAILED, keyData.getKeyState());
assertNull(keyData.getKeyMetadata());
@@ -99,8 +97,8 @@ public void testConstructorWithFailedStateAndNullMetadata() {
@Test
public void testConstructorWithRefreshTimestamp() {
long refreshTimestamp = System.currentTimeMillis();
- ManagedKeyData keyDataWithTimestamp = new ManagedKeyData(keyCust, keyNamespace, theKey,
- keyState, keyMetadata, refreshTimestamp);
+ ManagedKeyData keyDataWithTimestamp =
+ new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata, refreshTimestamp);
assertEquals(refreshTimestamp, keyDataWithTimestamp.getRefreshTimestamp());
}
@@ -156,10 +154,15 @@ public void testGetKeyMetadataHashEncoded() {
@Test
public void testGetKeyMetadataHashEncodedWithNullHash() {
// Create ManagedKeyData with FAILED state and null metadata
- ManagedKeyData keyData = new ManagedKeyData(
- "custodian".getBytes(), "namespace", null, ManagedKeyState.FAILED,
- null // null metadata should result in null hash
- );
+ ManagedKeyData keyData =
+ new ManagedKeyData("custodian".getBytes(), "namespace", null, ManagedKeyState.FAILED, null // null
+ // metadata
+ // should
+ // result
+ // in
+ // null
+ // hash
+ );
String encoded = keyData.getKeyMetadataHashEncoded();
assertNull(encoded);
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
index 876e14fa1101..472ce56405a9 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
@@ -34,7 +34,6 @@
import java.util.Properties;
import java.util.UUID;
import javax.crypto.spec.SecretKeySpec;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -43,7 +42,6 @@
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.GsonUtil;
-
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
@@ -99,19 +97,18 @@ protected void addCustomEntries(KeyStore store, Properties passwdProps) throws E
withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes());
- String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "."
- + "alias";
+ String confKey =
+ HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "." + "alias";
conf.set(confKey, alias);
passwdProps.setProperty(alias, PASSWORD);
clusterId = UUID.randomUUID().toString();
- systemKey = MessageDigest.getInstance("SHA-256").digest(
- Bytes.toBytes(SYSTEM_KEY_ALIAS));
- store.setEntry(SYSTEM_KEY_ALIAS, new KeyStore.SecretKeyEntry(
- new SecretKeySpec(systemKey, "AES")),
- new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() :
- new char[0]));
+ systemKey = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(SYSTEM_KEY_ALIAS));
+ store.setEntry(SYSTEM_KEY_ALIAS,
+ new KeyStore.SecretKeyEntry(new SecretKeySpec(systemKey, "AES")),
+ new KeyStore.PasswordProtection(
+ withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS);
@@ -121,24 +118,24 @@ protected void addCustomEntries(KeyStore store, Properties passwdProps) throws E
private void addEntry(String alias, String prefix) {
String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes());
- String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "."
- + "alias";
+ String confKey =
+ HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "." + "alias";
conf.set(confKey, alias);
}
@Test
public void testMissingConfig() throws Exception {
managedKeyProvider.initConfig(null);
- RuntimeException ex = assertThrows(RuntimeException.class,
- () -> managedKeyProvider.getSystemKey(null));
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> managedKeyProvider.getSystemKey(null));
assertEquals("initConfig is not called or config is null", ex.getMessage());
}
@Test
public void testGetManagedKey() throws Exception {
for (Bytes prefix : prefix2key.keySet()) {
- ManagedKeyData keyData = managedKeyProvider.getManagedKey(prefix.get(),
- ManagedKeyData.KEY_SPACE_GLOBAL);
+ ManagedKeyData keyData =
+ managedKeyProvider.getManagedKey(prefix.get(), ManagedKeyData.KEY_SPACE_GLOBAL);
assertKeyData(keyData, ManagedKeyState.ACTIVE, prefix2key.get(prefix).get(), prefix.get(),
prefix2alias.get(prefix));
}
@@ -150,8 +147,8 @@ public void testGetInactiveKey() throws Exception {
String encPrefix = Base64.getEncoder().encodeToString(firstPrefix.get());
conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + ".active",
"false");
- ManagedKeyData keyData = managedKeyProvider.getManagedKey(firstPrefix.get(),
- ManagedKeyData.KEY_SPACE_GLOBAL);
+ ManagedKeyData keyData =
+ managedKeyProvider.getManagedKey(firstPrefix.get(), ManagedKeyData.KEY_SPACE_GLOBAL);
assertNotNull(keyData);
assertKeyData(keyData, ManagedKeyState.INACTIVE, prefix2key.get(firstPrefix).get(),
firstPrefix.get(), prefix2alias.get(firstPrefix));
@@ -160,8 +157,8 @@ public void testGetInactiveKey() throws Exception {
@Test
public void testGetInvalidKey() throws Exception {
byte[] invalidPrefixBytes = "invalid".getBytes();
- ManagedKeyData keyData = managedKeyProvider.getManagedKey(invalidPrefixBytes,
- ManagedKeyData.KEY_SPACE_GLOBAL);
+ ManagedKeyData keyData =
+ managedKeyProvider.getManagedKey(invalidPrefixBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
assertNotNull(keyData);
assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefixBytes, null);
}
@@ -172,11 +169,10 @@ public void testGetDisabledKey() throws Exception {
String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active",
"false");
- ManagedKeyData keyData = managedKeyProvider.getManagedKey(invalidPrefix,
- ManagedKeyData.KEY_SPACE_GLOBAL);
+ ManagedKeyData keyData =
+ managedKeyProvider.getManagedKey(invalidPrefix, ManagedKeyData.KEY_SPACE_GLOBAL);
assertNotNull(keyData);
- assertKeyData(keyData, ManagedKeyState.DISABLED, null,
- invalidPrefix, null);
+ assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidPrefix, null);
}
@Test
@@ -185,12 +181,11 @@ public void testGetSystemKey() throws Exception {
assertKeyData(clusterKeyData, ManagedKeyState.ACTIVE, systemKey, clusterId.getBytes(),
SYSTEM_KEY_ALIAS);
conf.unset(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY);
- RuntimeException ex = assertThrows(RuntimeException.class,
- () -> managedKeyProvider.getSystemKey(null));
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> managedKeyProvider.getSystemKey(null));
assertEquals("No alias configured for system key", ex.getMessage());
conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, "non_existing_alias");
- ex = assertThrows(RuntimeException.class,
- () -> managedKeyProvider.getSystemKey(null));
+ ex = assertThrows(RuntimeException.class, () -> managedKeyProvider.getSystemKey(null));
assertTrue(ex.getMessage().startsWith("Unable to find system key with alias:"));
}
@@ -199,12 +194,11 @@ public void testUnwrapInvalidKey() throws Exception {
String invalidAlias = "invalidAlias";
byte[] invalidPrefix = new byte[] { 1, 2, 3 };
String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
- String invalidMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias,
- invalidPrefixEnc);
+ String invalidMetadata =
+ ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidPrefixEnc);
ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null);
assertNotNull(keyData);
- assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefix,
- invalidAlias);
+ assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefix, invalidAlias);
}
@Test
@@ -214,8 +208,8 @@ public void testUnwrapDisabledKey() throws Exception {
String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active",
"false");
- String invalidMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias,
- invalidPrefixEnc);
+ String invalidMetadata =
+ ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidPrefixEnc);
ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null);
assertNotNull(keyData);
assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidPrefix, invalidAlias);
@@ -227,14 +221,13 @@ private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState,
assertEquals(expKeyState, keyData.getKeyState());
if (key == null) {
assertNull(keyData.getTheKey());
- }
- else {
+ } else {
byte[] keyBytes = keyData.getTheKey().getEncoded();
assertEquals(key.length, keyBytes.length);
assertEquals(new Bytes(key), keyBytes);
}
- Map keyMetadata = GsonUtil.getDefaultInstance().fromJson(keyData.getKeyMetadata(),
- HashMap.class);
+ Map keyMetadata =
+ GsonUtil.getDefaultInstance().fromJson(keyData.getKeyMetadata(), HashMap.class);
assertNotNull(keyMetadata);
assertEquals(new Bytes(prefixBytes), keyData.getKeyCustodian());
assertEquals(alias, keyMetadata.get(KEY_METADATA_ALIAS));
@@ -251,7 +244,8 @@ public static class TestManagedKeyProviderDefault {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestManagedKeyProviderDefault.class);
- @Test public void testEncodeToStr() {
+ @Test
+ public void testEncodeToStr() {
byte[] input = { 72, 101, 108, 108, 111 }; // "Hello" in ASCII
String expected = "SGVsbG8=";
String actual = ManagedKeyProvider.encodeToStr(input);
@@ -259,7 +253,8 @@ public static class TestManagedKeyProviderDefault {
assertEquals("Encoded string should match expected Base64 representation", expected, actual);
}
- @Test public void testDecodeToBytes() throws Exception {
+ @Test
+ public void testDecodeToBytes() throws Exception {
String input = "SGVsbG8="; // "Hello" in Base64
byte[] expected = { 72, 101, 108, 108, 111 };
byte[] actual = ManagedKeyProvider.decodeToBytes(input);
@@ -268,7 +263,8 @@ public static class TestManagedKeyProviderDefault {
Arrays.equals(expected, actual));
}
- @Test public void testEncodeToStrAndDecodeToBytes() throws Exception {
+ @Test
+ public void testEncodeToStrAndDecodeToBytes() throws Exception {
byte[] originalBytes = { 1, 2, 3, 4, 5 };
String encoded = ManagedKeyProvider.encodeToStr(originalBytes);
byte[] decoded = ManagedKeyProvider.decodeToBytes(encoded);
@@ -277,13 +273,14 @@ public static class TestManagedKeyProviderDefault {
Arrays.equals(originalBytes, decoded));
}
- @Test(expected = Exception.class) public void testDecodeToBytes_InvalidInput()
- throws Exception {
+ @Test(expected = Exception.class)
+ public void testDecodeToBytes_InvalidInput() throws Exception {
String invalidInput = "This is not a valid Base64 string!";
ManagedKeyProvider.decodeToBytes(invalidInput);
}
- @Test public void testRoundTrip_LargeInput() throws Exception {
+ @Test
+ public void testRoundTrip_LargeInput() throws Exception {
byte[] largeInput = new byte[1000];
for (int i = 0; i < largeInput.length; i++) {
largeInput[i] = (byte) (i % 256);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 0996fbf21c52..39d09ab170f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -259,15 +259,18 @@ public ChoreService getChoreService() {
return null;
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index c0ddad9109ad..c1a6d7dc9ec8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -86,19 +86,13 @@ default AsyncConnection getAsyncConnection() {
/** Returns The {@link ChoreService} instance for this server */
ChoreService getChoreService();
- /**
- * @return the cache for cluster keys.
- */
+ /** Returns the cache for cluster keys. */
public SystemKeyCache getSystemKeyCache();
- /**
- * @return the cache for managed keys.
- */
+ /** Returns the cache for managed keys. */
public ManagedKeyDataCache getManagedKeyDataCache();
- /**
- * @return the admin for keymeta.
- */
+ /** Returns the admin for keymeta. */
public KeymetaAdmin getKeymetaAdmin();
/** Returns Return the FileSystem object used (can return null!). */
@@ -122,4 +116,4 @@ default FileSystem getFileSystem() {
default boolean isStopping() {
return false;
}
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
index 31c770785604..1e4ee2a3e796 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
@@ -19,7 +19,6 @@
import java.io.IOException;
import java.security.KeyException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server;
@@ -27,7 +26,6 @@
import org.apache.hadoop.hbase.io.crypto.KeyProvider;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
-import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -49,7 +47,6 @@ public abstract class KeyManagementBase {
/**
* Construct with a server instance. Configuration is derived from the server.
- *
* @param server the server instance
*/
public KeyManagementBase(Server server) {
@@ -59,7 +56,6 @@ public KeyManagementBase(Server server) {
/**
* Construct with a custom configuration and no server.
- *
* @param configuration the configuration instance
*/
public KeyManagementBase(Configuration configuration) {
@@ -81,13 +77,13 @@ protected Configuration getConfiguration() {
* A utility method for getting the managed key provider.
* @return the key provider
* @throws RuntimeException if no provider is configured or if the configured provider is not an
- * instance of ManagedKeyProvider
+ * instance of ManagedKeyProvider
*/
protected ManagedKeyProvider getKeyProvider() {
KeyProvider provider = Encryption.getKeyProvider(getConfiguration());
if (!(provider instanceof ManagedKeyProvider)) {
throw new RuntimeException("KeyProvider: " + provider.getClass().getName()
- + " expected to be of type ManagedKeyProvider");
+ + " expected to be of type ManagedKeyProvider");
}
return (ManagedKeyProvider) provider;
}
@@ -123,38 +119,38 @@ protected boolean isKeyManagementEnabled() {
}
/**
- * Utility function to retrieves a managed key from the key provider. If an existing key is
+ * Utility function to retrieves a managed key from the key provider. If an existing key is
* provided and the retrieved key is the same as the existing key, it will be ignored.
- *
- * @param encKeyCust the encoded key custodian
- * @param key_cust the key custodian
- * @param keyNamespace the key namespace
- * @param accessor the accessor to use to persist the key. If null, the key will not be persisted.
+ * @param encKeyCust the encoded key custodian
+ * @param key_cust the key custodian
+ * @param keyNamespace the key namespace
+ * @param accessor the accessor to use to persist the key. If null, the key will not be
+ * persisted.
* @param existingActiveKey the existing key, typically the active key already retrieved from the
- * key provider, can be null.
+ * key provider, can be null.
* @return the retrieved key, or null if no key could be retrieved
- * @throws IOException if an error occurs
+ * @throws IOException if an error occurs
* @throws KeyException if an error occurs
*/
protected ManagedKeyData retrieveActiveKey(String encKeyCust, byte[] key_cust,
- String keyNamespace, KeymetaTableAccessor accessor, ManagedKeyData existingActiveKey)
- throws IOException, KeyException {
+ String keyNamespace, KeymetaTableAccessor accessor, ManagedKeyData existingActiveKey)
+ throws IOException, KeyException {
ManagedKeyProvider provider = getKeyProvider();
ManagedKeyData pbeKey = provider.getManagedKey(key_cust, keyNamespace);
if (pbeKey == null) {
throw new IOException("Invalid null managed key received from key provider");
}
- /* Will be useful when refresh API is implemented.
- if (existingActiveKey != null && existingActiveKey.equals(pbeKey)) {
- LOG.info("retrieveManagedKey: no change in key for (custodian: {}, namespace: {}",
- encKeyCust, keyNamespace);
- return null;
- }
- // TODO: If existingActiveKey is not null, we should update the key state to INACTIVE.
+ /*
+ * Will be useful when refresh API is implemented. if (existingActiveKey != null &&
+ * existingActiveKey.equals(pbeKey)) {
+ * LOG.info("retrieveManagedKey: no change in key for (custodian: {}, namespace: {}",
+ * encKeyCust, keyNamespace); return null; } // TODO: If existingActiveKey is not null, we
+ * should update the key state to INACTIVE.
*/
- LOG.info("retrieveManagedKey: got managed key with status: {} and metadata: {} for "
- + "(custodian: {}, namespace: {})", pbeKey.getKeyState(), pbeKey.getKeyMetadata(),
- encKeyCust, keyNamespace);
+ LOG.info(
+ "retrieveManagedKey: got managed key with status: {} and metadata: {} for "
+ + "(custodian: {}, namespace: {})",
+ pbeKey.getKeyState(), pbeKey.getKeyMetadata(), encKeyCust, keyNamespace);
if (accessor != null) {
accessor.addKey(pbeKey);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java
index 02fb31b770e6..4c16d2b59aa7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java
@@ -21,7 +21,6 @@
import java.security.KeyException;
import java.util.Collections;
import java.util.List;
-
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
@@ -48,9 +47,10 @@ public List enableKeyManagement(String keyCust, String keyNamesp
// Check if (cust, namespace) pair is already enabled and has an active key.
ManagedKeyData activeKey = getActiveKey(key_cust, keyNamespace);
if (activeKey != null) {
- LOG.info("enableManagedKeys: specified (custodian: {}, namespace: {}) already has "
- + "an active managed key with metadata: {}", keyCust, keyNamespace,
- activeKey.getKeyMetadata());
+ LOG.info(
+ "enableManagedKeys: specified (custodian: {}, namespace: {}) already has "
+ + "an active managed key with metadata: {}",
+ keyCust, keyNamespace, activeKey.getKeyMetadata());
return Collections.singletonList(activeKey);
}
@@ -63,8 +63,7 @@ public List enableKeyManagement(String keyCust, String keyNamesp
public List getManagedKeys(String keyCust, String keyNamespace)
throws IOException, KeyException {
assertKeyManagementEnabled();
- LOG.info("Getting key statuses for custodian: {} under namespace: {}", keyCust,
- keyNamespace);
+ LOG.info("Getting key statuses for custodian: {} under namespace: {}", keyCust, keyNamespace);
byte[] key_cust = ManagedKeyProvider.decodeToBytes(keyCust);
return getAllKeys(key_cust, keyNamespace);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java
index 68f78cd12dd3..c33a331ba04a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.keymeta;
import java.io.IOException;
-
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
@@ -33,14 +32,12 @@ public class KeymetaMasterService extends KeyManagementBase {
private final MasterServices master;
- private static final TableDescriptorBuilder TABLE_DESCRIPTOR_BUILDER = TableDescriptorBuilder
- .newBuilder(KeymetaTableAccessor.KEY_META_TABLE_NAME).setRegionReplication(1)
- .setPriority(HConstants.SYSTEMTABLE_QOS)
- .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(
- KeymetaTableAccessor.KEY_META_INFO_FAMILY)
- .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setMaxVersions(1)
- .setInMemory(true)
- .build());
+ private static final TableDescriptorBuilder TABLE_DESCRIPTOR_BUILDER =
+ TableDescriptorBuilder.newBuilder(KeymetaTableAccessor.KEY_META_TABLE_NAME)
+ .setRegionReplication(1).setPriority(HConstants.SYSTEMTABLE_QOS)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder
+ .newBuilder(KeymetaTableAccessor.KEY_META_INFO_FAMILY)
+ .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setMaxVersions(1).setInMemory(true).build());
public KeymetaMasterService(MasterServices masterServices) {
super(masterServices);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
index fde42b8dd295..07b73376fa5f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
@@ -22,7 +22,6 @@
import java.util.Base64;
import java.util.Collections;
import java.util.List;
-
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
@@ -46,13 +45,13 @@
/**
* This class implements a coprocessor service endpoint for the key management metadata operations.
- * It handles the following methods:
- *
- * This endpoint is designed to work in conjunction with the {@link KeymetaAdmin}
- * interface, which provides the actual implementation of the key metadata operations.
+ * It handles the following methods: This endpoint is designed to work in conjunction with the
+ * {@link KeymetaAdmin} interface, which provides the actual implementation of the key metadata
+ * operations.
*
*/
-@CoreCoprocessor @InterfaceAudience.Private
+@CoreCoprocessor
+@InterfaceAudience.Private
public class KeymetaServiceEndpoint implements MasterCoprocessor {
private static final Logger LOG = LoggerFactory.getLogger(KeymetaServiceEndpoint.class);
@@ -63,7 +62,6 @@ public class KeymetaServiceEndpoint implements MasterCoprocessor {
/**
* Starts the coprocessor by initializing the reference to the
* {@link org.apache.hadoop.hbase.master.MasterServices} * instance.
- *
* @param env The coprocessor environment.
* @throws IOException If an error occurs during initialization.
*/
@@ -80,7 +78,6 @@ public void start(CoprocessorEnvironment env) throws IOException {
* Returns an iterable of the available coprocessor services, which includes the
* {@link ManagedKeysService} implemented by
* {@link KeymetaServiceEndpoint.KeymetaAdminServiceImpl}.
- *
* @return An iterable of the available coprocessor services.
*/
@Override
@@ -89,8 +86,8 @@ public Iterable getServices() {
}
/**
- * The implementation of the {@link ManagedKeysProtos.ManagedKeysService}
- * interface, which provides the actual method implementations for enabling key management.
+ * The implementation of the {@link ManagedKeysProtos.ManagedKeysService} interface, which
+ * provides the actual method implementations for enabling key management.
*/
@InterfaceAudience.Private
public class KeymetaAdminServiceImpl extends ManagedKeysService {
@@ -98,16 +95,15 @@ public class KeymetaAdminServiceImpl extends ManagedKeysService {
/**
* Enables key management for a given tenant and namespace, as specified in the provided
* request.
- *
* @param controller The RPC controller.
* @param request The request containing the tenant and table specifications.
* @param done The callback to be invoked with the response.
*/
@Override
public void enableKeyManagement(RpcController controller, ManagedKeysRequest request,
- RpcCallback done) {
+ RpcCallback done) {
ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request);
- if (builder.getKeyCust() != null && ! builder.getKeyCust().isEmpty()) {
+ if (builder.getKeyCust() != null && !builder.getKeyCust().isEmpty()) {
try {
List managedKeyStates = master.getKeymetaAdmin()
.enableKeyManagement(request.getKeyCust(), request.getKeyNamespace());
@@ -122,9 +118,9 @@ public void enableKeyManagement(RpcController controller, ManagedKeysRequest req
@Override
public void getManagedKeys(RpcController controller, ManagedKeysRequest request,
- RpcCallback done) {
+ RpcCallback done) {
ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request);
- if (builder.getKeyCust() != null && ! builder.getKeyCust().isEmpty()) {
+ if (builder.getKeyCust() != null && !builder.getKeyCust().isEmpty()) {
try {
List managedKeyStates = master.getKeymetaAdmin()
.getManagedKeys(request.getKeyCust(), request.getKeyNamespace());
@@ -141,8 +137,8 @@ public void getManagedKeys(RpcController controller, ManagedKeysRequest request,
@InterfaceAudience.Private
public static ManagedKeysResponse.Builder getResponseBuilder(RpcController controller,
ManagedKeysRequest request) {
- ManagedKeysResponse.Builder builder = ManagedKeysResponse.newBuilder()
- .setKeyNamespace(request.getKeyNamespace());
+ ManagedKeysResponse.Builder builder =
+ ManagedKeysResponse.newBuilder().setKeyNamespace(request.getKeyNamespace());
byte[] key_cust = convertToKeyCustBytes(controller, request, builder);
if (key_cust != null) {
builder.setKeyCustBytes(ByteString.copyFrom(key_cust));
@@ -155,12 +151,10 @@ public static ManagedKeysResponse.Builder getResponseBuilder(RpcController contr
public static GetManagedKeysResponse generateKeyStateResponse(
List managedKeyStates, ManagedKeysResponse.Builder builder) {
GetManagedKeysResponse.Builder responseBuilder = GetManagedKeysResponse.newBuilder();
- for (ManagedKeyData keyData: managedKeyStates) {
- builder.setKeyState(ManagedKeysProtos.ManagedKeyState.valueOf(
- keyData.getKeyState().getVal()))
+ for (ManagedKeyData keyData : managedKeyStates) {
+ builder.setKeyState(ManagedKeysProtos.ManagedKeyState.valueOf(keyData.getKeyState().getVal()))
.setKeyMetadata(keyData.getKeyMetadata())
- .setRefreshTimestamp(keyData.getRefreshTimestamp())
- ;
+ .setRefreshTimestamp(keyData.getRefreshTimestamp());
responseBuilder.addState(builder.build());
}
return responseBuilder.build();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
index 08d92a4e1a20..6862e35ddf10 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
@@ -21,11 +21,9 @@
import java.security.Key;
import java.security.KeyException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
-
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NamespaceDescriptor;
@@ -66,14 +64,14 @@ public class KeymetaTableAccessor extends KeyManagementBase {
public static final String DEK_WRAPPED_BY_STK_QUAL_NAME = "w";
public static final byte[] DEK_WRAPPED_BY_STK_QUAL_BYTES =
- Bytes.toBytes(DEK_WRAPPED_BY_STK_QUAL_NAME);
+ Bytes.toBytes(DEK_WRAPPED_BY_STK_QUAL_NAME);
public static final String STK_CHECKSUM_QUAL_NAME = "s";
public static final byte[] STK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(STK_CHECKSUM_QUAL_NAME);
public static final String REFRESHED_TIMESTAMP_QUAL_NAME = "t";
public static final byte[] REFRESHED_TIMESTAMP_QUAL_BYTES =
- Bytes.toBytes(REFRESHED_TIMESTAMP_QUAL_NAME);
+ Bytes.toBytes(REFRESHED_TIMESTAMP_QUAL_NAME);
public static final String KEY_STATE_QUAL_NAME = "k";
public static final byte[] KEY_STATE_QUAL_BYTES = Bytes.toBytes(KEY_STATE_QUAL_NAME);
@@ -91,11 +89,10 @@ public void addKey(ManagedKeyData keyData) throws IOException {
assertKeyManagementEnabled();
List puts = new ArrayList<>(2);
if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
- puts.add(addMutationColumns(new Put(constructRowKeyForCustNamespace(keyData)),
- keyData));
+ puts.add(addMutationColumns(new Put(constructRowKeyForCustNamespace(keyData)), keyData));
}
- final Put putForMetadata = addMutationColumns(new Put(constructRowKeyForMetadata(keyData)),
- keyData);
+ final Put putForMetadata =
+ addMutationColumns(new Put(constructRowKeyForMetadata(keyData)), keyData);
puts.add(putForMetadata);
Connection connection = getServer().getConnection();
try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
@@ -105,11 +102,10 @@ public void addKey(ManagedKeyData keyData) throws IOException {
/**
* Get all the keys for the specified key_cust and key_namespace.
- *
* @param key_cust The key custodian.
* @param keyNamespace The namespace
* @return a list of key data, one for each key, can be empty when none were found.
- * @throws IOException when there is an underlying IOException.
+ * @throws IOException when there is an underlying IOException.
* @throws KeyException when there is an underlying KeyException.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
@@ -117,8 +113,8 @@ public List getAllKeys(byte[] key_cust, String keyNamespace)
throws IOException, KeyException {
assertKeyManagementEnabled();
Connection connection = getServer().getConnection();
- byte[] prefixForScan = Bytes.add(Bytes.toBytes(key_cust.length), key_cust,
- Bytes.toBytes(keyNamespace));
+ byte[] prefixForScan =
+ Bytes.add(Bytes.toBytes(key_cust.length), key_cust, Bytes.toBytes(keyNamespace));
PrefixFilter prefixFilter = new PrefixFilter(prefixForScan);
Scan scan = new Scan();
scan.setFilter(prefixFilter);
@@ -139,11 +135,10 @@ public List getAllKeys(byte[] key_cust, String keyNamespace)
/**
* Get the active key for the specified key_cust and key_namespace.
- *
- * @param key_cust The prefix
+ * @param key_cust The prefix
* @param keyNamespace The namespace
* @return the active key data, or null if no active key found
- * @throws IOException when there is an underlying IOException.
+ * @throws IOException when there is an underlying IOException.
* @throws KeyException when there is an underlying KeyException.
*/
public ManagedKeyData getActiveKey(byte[] key_cust, String keyNamespace)
@@ -162,12 +157,11 @@ public ManagedKeyData getActiveKey(byte[] key_cust, String keyNamespace)
/**
* Get the specific key identified by key_cust, keyNamespace and keyState.
- *
- * @param key_cust The prefix.
+ * @param key_cust The prefix.
* @param keyNamespace The namespace.
- * @param keyState The state of the key.
+ * @param keyState The state of the key.
* @return the key or {@code null}
- * @throws IOException when there is an underlying IOException.
+ * @throws IOException when there is an underlying IOException.
* @throws KeyException when there is an underlying KeyException.
*/
public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, ManagedKeyState keyState)
@@ -177,32 +171,30 @@ public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, ManagedKeySta
/**
* Get the specific key identified by key_cust, keyNamespace and keyMetadata.
- *
- * @param key_cust The prefix.
+ * @param key_cust The prefix.
* @param keyNamespace The namespace.
* @param keyMetadata The metadata.
* @return the key or {@code null}
- * @throws IOException when there is an underlying IOException.
+ * @throws IOException when there is an underlying IOException.
* @throws KeyException when there is an underlying KeyException.
*/
public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, String keyMetadata)
throws IOException, KeyException {
return getKeyInternal(key_cust, keyNamespace,
- ManagedKeyData.constructMetadataHash(keyMetadata));
+ ManagedKeyData.constructMetadataHash(keyMetadata));
}
/**
* Internal helper method to get a key using the provided metadata hash.
- *
* @param key_cust The prefix.
* @param keyNamespace The namespace.
* @param keyMetadataHash The metadata hash or state value.
* @return the key or {@code null}
- * @throws IOException when there is an underlying IOException.
+ * @throws IOException when there is an underlying IOException.
* @throws KeyException when there is an underlying KeyException.
*/
private ManagedKeyData getKeyInternal(byte[] key_cust, String keyNamespace,
- byte[] keyMetadataHash) throws IOException, KeyException {
+ byte[] keyMetadataHash) throws IOException, KeyException {
assertKeyManagementEnabled();
Connection connection = getServer().getConnection();
try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
@@ -220,20 +212,18 @@ private Put addMutationColumns(Put put, ManagedKeyData keyData) throws IOExcepti
if (keyData.getTheKey() != null) {
byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getServer().getConfiguration(), null,
keyData.getTheKey(), latestSystemKey.getTheKey());
- put.addColumn(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES,
+ put
+ .addColumn(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES,
Bytes.toBytes(keyData.getKeyChecksum()))
- .addColumn(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES, dekWrappedBySTK)
- .addColumn(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES,
- Bytes.toBytes(latestSystemKey.getKeyChecksum()))
- ;
+ .addColumn(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES, dekWrappedBySTK)
+ .addColumn(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES,
+ Bytes.toBytes(latestSystemKey.getKeyChecksum()));
}
- Put result = put.setDurability(Durability.SKIP_WAL)
- .setPriority(HConstants.SYSTEMTABLE_QOS)
+ Put result = put.setDurability(Durability.SKIP_WAL).setPriority(HConstants.SYSTEMTABLE_QOS)
.addColumn(KEY_META_INFO_FAMILY, REFRESHED_TIMESTAMP_QUAL_BYTES,
Bytes.toBytes(keyData.getRefreshTimestamp()))
.addColumn(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES,
- new byte[] { keyData.getKeyState().getVal() })
- ;
+ new byte[] { keyData.getKeyState().getVal() });
// Only add metadata column if metadata is not null
String metadata = keyData.getKeyMetadata();
@@ -259,7 +249,7 @@ public static byte[] constructRowKeyForMetadata(ManagedKeyData keyData) {
@InterfaceAudience.Private
public static byte[] constructRowKeyForMetadata(byte[] key_cust, String keyNamespace,
- byte[] keyMetadataHash) {
+ byte[] keyMetadataHash) {
return Bytes.add(constructRowKeyForCustNamespace(key_cust, keyNamespace), keyMetadataHash);
}
@@ -276,17 +266,19 @@ public static byte[] constructRowKeyForCustNamespace(byte[] key_cust, String key
@InterfaceAudience.Private
public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, String keyNamespace,
- Result result) throws IOException, KeyException {
+ Result result) throws IOException, KeyException {
if (result == null || result.isEmpty()) {
return null;
}
- ManagedKeyState keyState = ManagedKeyState.forValue(
- result.getValue(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES)[0]);
- String dekMetadata = Bytes.toString(result.getValue(KEY_META_INFO_FAMILY,
- DEK_METADATA_QUAL_BYTES));
+ ManagedKeyState keyState =
+ ManagedKeyState.forValue(result.getValue(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES)[0]);
+ String dekMetadata =
+ Bytes.toString(result.getValue(KEY_META_INFO_FAMILY, DEK_METADATA_QUAL_BYTES));
byte[] dekWrappedByStk = result.getValue(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES);
- if ((keyState == ManagedKeyState.ACTIVE || keyState == ManagedKeyState.INACTIVE)
- && dekWrappedByStk == null) {
+ if (
+ (keyState == ManagedKeyState.ACTIVE || keyState == ManagedKeyState.INACTIVE)
+ && dekWrappedByStk == null
+ ) {
throw new IOException(keyState + " key must have a wrapped key");
}
Key dek = null;
@@ -302,14 +294,13 @@ public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, Str
dek = EncryptionUtil.unwrapKey(server.getConfiguration(), null, dekWrappedByStk,
clusterKey.getTheKey());
}
- long refreshedTimestamp = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY,
- REFRESHED_TIMESTAMP_QUAL_BYTES));
- ManagedKeyData
- dekKeyData = new ManagedKeyData(key_cust, keyNamespace, dek, keyState, dekMetadata,
- refreshedTimestamp);
+ long refreshedTimestamp =
+ Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, REFRESHED_TIMESTAMP_QUAL_BYTES));
+ ManagedKeyData dekKeyData =
+ new ManagedKeyData(key_cust, keyNamespace, dek, keyState, dekMetadata, refreshedTimestamp);
if (dek != null) {
- long dekChecksum = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY,
- DEK_CHECKSUM_QUAL_BYTES));
+ long dekChecksum =
+ Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES));
if (dekKeyData.getKeyChecksum() != dekChecksum) {
LOG.error("Dropping key, current key checksum: {} didn't match the expected checksum: {}"
+ " for key with metadata: {}", dekKeyData.getKeyChecksum(), dekChecksum, dekMetadata);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
index 87c2195543c2..0b51f8c54a09 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
@@ -17,10 +17,11 @@
*/
package org.apache.hadoop.hbase.keymeta;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
import java.io.IOException;
import java.security.KeyException;
import java.util.Objects;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
@@ -32,11 +33,8 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.github.benmanes.caffeine.cache.Cache;
-import com.github.benmanes.caffeine.cache.Caffeine;
-
/**
- * In-memory cache for ManagedKeyData entries, using key metadata as the cache key. Uses two
+ * In-memory cache for ManagedKeyData entries, using key metadata as the cache key. Uses two
* independent Caffeine caches: one for general key data and one for active keys only with
* hierarchical structure for efficient single key retrieval.
*/
@@ -49,8 +47,8 @@ public class ManagedKeyDataCache extends KeyManagementBase {
private final KeymetaTableAccessor keymetaAccessor;
/**
- * Composite key for active keys cache containing custodian and namespace.
- * NOTE: Pair won't work out of the box because it won't work with byte[] as is.
+ * Composite key for active keys cache containing custodian and namespace. NOTE: Pair won't work
+ * out of the box because it won't work with byte[] as is.
*/
@InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.UNITTEST })
public static class ActiveKeysCacheKey {
@@ -64,13 +62,11 @@ public ActiveKeysCacheKey(byte[] custodian, String namespace) {
@Override
public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null || getClass() != obj.getClass())
- return false;
+ if (this == obj) return true;
+ if (obj == null || getClass() != obj.getClass()) return false;
ActiveKeysCacheKey cacheKey = (ActiveKeysCacheKey) obj;
- return Bytes.equals(custodian, cacheKey.custodian) &&
- Objects.equals(namespace, cacheKey.namespace);
+ return Bytes.equals(custodian, cacheKey.custodian)
+ && Objects.equals(namespace, cacheKey.namespace);
}
@Override
@@ -82,8 +78,7 @@ public int hashCode() {
/**
* Constructs the ManagedKeyDataCache with the given configuration and keymeta accessor. When
* keymetaAccessor is null, L2 lookup is disabled and dynamic lookup is enabled.
- *
- * @param conf The configuration, can't be null.
+ * @param conf The configuration, can't be null.
* @param keymetaAccessor The keymeta accessor, can be null.
*/
public ManagedKeyDataCache(Configuration conf, KeymetaTableAccessor keymetaAccessor) {
@@ -93,35 +88,28 @@ public ManagedKeyDataCache(Configuration conf, KeymetaTableAccessor keymetaAcces
conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, true);
}
- int maxEntries = conf.getInt(
- HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY,
- HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT);
- int activeKeysMaxEntries = conf.getInt(
- HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY,
+ int maxEntries = conf.getInt(HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT);
+ int activeKeysMaxEntries =
+ conf.getInt(HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY,
HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT);
- this.cacheByMetadata = Caffeine.newBuilder()
- .maximumSize(maxEntries)
- .build();
- this.activeKeysCache = Caffeine.newBuilder()
- .maximumSize(activeKeysMaxEntries)
- .build();
+ this.cacheByMetadata = Caffeine.newBuilder().maximumSize(maxEntries).build();
+ this.activeKeysCache = Caffeine.newBuilder().maximumSize(activeKeysMaxEntries).build();
}
/**
* Retrieves an entry from the cache, loading it from L2 if KeymetaTableAccessor is available.
* When L2 is not available, it will try to load from provider, unless dynamic lookup is disabled.
- *
* @param key_cust the key custodian
* @param keyNamespace the key namespace
* @param keyMetadata the key metadata of the entry to be retrieved
- * @param wrappedKey The DEK key material encrypted with the corresponding
- * KEK, if available.
+ * @param wrappedKey The DEK key material encrypted with the corresponding KEK, if available.
* @return the corresponding ManagedKeyData entry, or null if not found
* @throws IOException if an error occurs while loading from KeymetaTableAccessor
* @throws KeyException if an error occurs while loading from KeymetaTableAccessor
*/
public ManagedKeyData getEntry(byte[] key_cust, String keyNamespace, String keyMetadata,
- byte[] wrappedKey) throws IOException, KeyException {
+ byte[] wrappedKey) throws IOException, KeyException {
ManagedKeyData entry = cacheByMetadata.get(keyMetadata, metadata -> {
// First check if it's in the active keys cache
ManagedKeyData keyData = getFromActiveKeysCache(key_cust, keyNamespace, keyMetadata);
@@ -141,8 +129,8 @@ public ManagedKeyData getEntry(byte[] key_cust, String keyNamespace, String keyM
ManagedKeyProvider provider = getKeyProvider();
keyData = provider.unwrapKey(metadata, wrappedKey);
LOG.info("Got key data with status: {} and metadata: {} for prefix: {}",
- keyData.getKeyState(), keyData.getKeyMetadata(),
- ManagedKeyProvider.encodeToStr(key_cust));
+ keyData.getKeyState(), keyData.getKeyMetadata(),
+ ManagedKeyProvider.encodeToStr(key_cust));
// Add to KeymetaTableAccessor for future L2 lookups
if (keymetaAccessor != null) {
try {
@@ -157,19 +145,19 @@ public ManagedKeyData getEntry(byte[] key_cust, String keyNamespace, String keyM
}
if (keyData == null) {
- keyData = new ManagedKeyData(key_cust, keyNamespace, null, ManagedKeyState.FAILED,
- keyMetadata);
+ keyData =
+ new ManagedKeyData(key_cust, keyNamespace, null, ManagedKeyState.FAILED, keyMetadata);
}
// Also update activeKeysCache if relevant and is missing.
if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
activeKeysCache.asMap().putIfAbsent(new ActiveKeysCacheKey(key_cust, keyNamespace),
- keyData);
+ keyData);
}
if (!ManagedKeyState.isUsable(keyData.getKeyState())) {
- LOG.info("Failed to get usable key data with metadata: {} for prefix: {}",
- metadata, ManagedKeyProvider.encodeToStr(key_cust));
+ LOG.info("Failed to get usable key data with metadata: {} for prefix: {}", metadata,
+ ManagedKeyProvider.encodeToStr(key_cust));
}
return keyData;
});
@@ -181,14 +169,13 @@ public ManagedKeyData getEntry(byte[] key_cust, String keyNamespace, String keyM
/**
* Retrieves an existing key from the active keys cache.
- *
* @param key_cust the key custodian
* @param keyNamespace the key namespace
* @param keyMetadata the key metadata
* @return the ManagedKeyData if found, null otherwise
*/
private ManagedKeyData getFromActiveKeysCache(byte[] key_cust, String keyNamespace,
- String keyMetadata) {
+ String keyMetadata) {
ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, keyNamespace);
ManagedKeyData keyData = activeKeysCache.getIfPresent(cacheKey);
if (keyData != null && keyData.getKeyMetadata().equals(keyMetadata)) {
@@ -199,27 +186,24 @@ private ManagedKeyData getFromActiveKeysCache(byte[] key_cust, String keyNamespa
/**
* @return the approximate number of entries in the main cache which is meant for general lookup
- * by key metadata.
+ * by key metadata.
*/
public int getGenericCacheEntryCount() {
return (int) cacheByMetadata.estimatedSize();
}
- /**
- * @return the approximate number of entries in the active keys cache
- */
+ /** Returns the approximate number of entries in the active keys cache */
public int getActiveCacheEntryCount() {
return (int) activeKeysCache.estimatedSize();
}
/**
- * Retrieves the active entry from the cache based on its key custodian and key namespace.
- * This method also loads active keys from provider if not found in cache.
- *
+ * Retrieves the active entry from the cache based on its key custodian and key namespace. This
+ * method also loads active keys from provider if not found in cache.
* @param key_cust The key custodian.
* @param keyNamespace the key namespace to search for
- * @return the ManagedKeyData entry with the given custodian and ACTIVE status, or null if
- * not found
+ * @return the ManagedKeyData entry with the given custodian and ACTIVE status, or null if not
+ * found
*/
public ManagedKeyData getActiveEntry(byte[] key_cust, String keyNamespace) {
ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, keyNamespace);
@@ -233,7 +217,7 @@ public ManagedKeyData getActiveEntry(byte[] key_cust, String keyNamespace) {
retrievedKey = keymetaAccessor.getActiveKey(key_cust, keyNamespace);
} catch (IOException | KeyException | RuntimeException e) {
LOG.warn("Failed to load active key from KeymetaTableAccessor for custodian: {} "
- + "namespace: {}", ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e);
+ + "namespace: {}", ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e);
}
}
@@ -245,13 +229,13 @@ public ManagedKeyData getActiveEntry(byte[] key_cust, String keyNamespace) {
retrievedKey = retrieveActiveKey(keyCust, key_cust, keyNamespace, keymetaAccessor, null);
} catch (IOException | KeyException | RuntimeException e) {
LOG.warn("Failed to load active key from provider for custodian: {} namespace: {}",
- ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e);
+ ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e);
}
}
if (retrievedKey == null) {
- retrievedKey = new ManagedKeyData(key_cust, keyNamespace, null, ManagedKeyState.FAILED,
- null);
+ retrievedKey =
+ new ManagedKeyData(key_cust, keyNamespace, null, ManagedKeyState.FAILED, null);
}
return retrievedKey;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
index 5a89d38a0bb2..ecac8e1a2857 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
@@ -25,7 +25,6 @@
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
-
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -50,10 +49,10 @@ public SystemKeyAccessor(Server server) throws IOException {
* Return both the latest system key file and all system key files.
* @return a pair of the latest system key file and all system key files
* @throws IOException if there is an error getting the latest system key file or no cluster key
- * is initialized yet.
+ * is initialized yet.
*/
- public Pair> getLatestSystemKeyFile() throws IOException {
- if (! isKeyManagementEnabled()) {
+ public Pair> getLatestSystemKeyFile() throws IOException {
+ if (!isKeyManagementEnabled()) {
return new Pair<>(null, null);
}
List allClusterKeyFiles = getAllSystemKeyFiles();
@@ -66,11 +65,10 @@ public Pair> getLatestSystemKeyFile() throws IOException {
}
/**
- * Return all available cluster key files and return them in the order of latest to oldest.
- * If no cluster key files are available, then return an empty list. If key management is not
- * enabled, then return null.
- *
- * @return a list of all available cluster key files
+ * Return all available cluster key files and return them in the order of latest to oldest. If no
+ * cluster key files are available, then return an empty list. If key management is not enabled,
+ * then return null.
+ * @return a list of all available cluster key files
* @throws IOException if there is an error getting the cluster key files
*/
public List getAllSystemKeyFiles() throws IOException {
@@ -79,8 +77,7 @@ public List getAllSystemKeyFiles() throws IOException {
}
FileSystem fs = getServer().getFileSystem();
Map clusterKeys = new TreeMap<>(Comparator.reverseOrder());
- for (FileStatus st : fs.globStatus(new Path(systemKeyDir,
- SYSTEM_KEY_FILE_PREFIX + "*"))) {
+ for (FileStatus st : fs.globStatus(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))) {
Path keyPath = st.getPath();
int seqNum = extractSystemKeySeqNum(keyPath);
clusterKeys.put(seqNum, keyPath);
@@ -103,8 +100,7 @@ public static int extractSystemKeySeqNum(Path keyPath) throws IOException {
if (keyPath.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) {
try {
return Integer.parseInt(keyPath.getName().substring(SYSTEM_KEY_FILE_PREFIX.length()));
- }
- catch (NumberFormatException e) {
+ } catch (NumberFormatException e) {
LOG.error("Invalid file name for a cluster key: {}", keyPath, e);
}
}
@@ -122,7 +118,7 @@ public static int extractKeySequence(Path clusterKeyFile) throws IOException {
int keySeq = -1;
if (clusterKeyFile.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) {
String seqStr = clusterKeyFile.getName().substring(SYSTEM_KEY_FILE_PREFIX.length());
- if (! seqStr.isEmpty()) {
+ if (!seqStr.isEmpty()) {
try {
keySeq = Integer.parseInt(seqStr);
} catch (NumberFormatException e) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
index d1e3eb048a9b..bb7a6e3f6935 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
@@ -21,7 +21,6 @@
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
-
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.yetus.audience.InterfaceAudience;
@@ -50,7 +49,7 @@ public static SystemKeyCache createCache(SystemKeyAccessor accessor) throws IOEx
}
ManagedKeyData latestSystemKey = null;
Map systemKeys = new TreeMap<>();
- for (Path keyPath: allSystemKeyFiles) {
+ for (Path keyPath : allSystemKeyFiles) {
LOG.info("Loading system key from: {}", keyPath);
ManagedKeyData keyData = accessor.loadSystemKey(keyPath);
if (latestSystemKey == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
index 99a373c8262f..18dfc7d493bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
@@ -21,6 +21,7 @@
import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER;
import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER;
import static org.apache.hadoop.hbase.master.MasterWalManager.NON_META_FILTER;
+
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
index 45b021c77feb..2ca423bad8e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
@@ -22,7 +22,6 @@
import java.io.IOException;
import java.util.List;
import java.util.UUID;
-
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
@@ -42,28 +41,25 @@ public SystemKeyManager(MasterServices master) throws IOException {
}
public void ensureSystemKeyInitialized() throws IOException {
- if (! isKeyManagementEnabled()) {
+ if (!isKeyManagementEnabled()) {
return;
}
List clusterKeys = getAllSystemKeyFiles();
if (clusterKeys.isEmpty()) {
LOG.info("Initializing System Key for the first time");
// Double check for cluster key as another HMaster might have succeeded.
- if (rotateSystemKey(null, clusterKeys) == null &&
- getAllSystemKeyFiles().isEmpty()) {
+ if (rotateSystemKey(null, clusterKeys) == null && getAllSystemKeyFiles().isEmpty()) {
throw new RuntimeException("Failed to generate or save System Key");
}
- }
- else if (rotateSystemKeyIfChanged() != null) {
+ } else if (rotateSystemKeyIfChanged() != null) {
LOG.info("System key has been rotated");
- }
- else {
+ } else {
LOG.info("System key is already initialized and unchanged");
}
}
public ManagedKeyData rotateSystemKeyIfChanged() throws IOException {
- if (! isKeyManagementEnabled()) {
+ if (!isKeyManagementEnabled()) {
return null;
}
Pair> latestFileResult = getLatestSystemKeyFile();
@@ -73,40 +69,42 @@ public ManagedKeyData rotateSystemKeyIfChanged() throws IOException {
}
private ManagedKeyData rotateSystemKey(String currentKeyMetadata, List allSystemKeyFiles)
- throws IOException {
+ throws IOException {
ManagedKeyProvider provider = getKeyProvider();
- ManagedKeyData clusterKey = provider.getSystemKey(
- master.getMasterFileSystem().getClusterId().toString().getBytes());
+ ManagedKeyData clusterKey =
+ provider.getSystemKey(master.getMasterFileSystem().getClusterId().toString().getBytes());
if (clusterKey == null) {
- throw new IOException("Failed to get system key for cluster id: " +
- master.getMasterFileSystem().getClusterId().toString());
+ throw new IOException("Failed to get system key for cluster id: "
+ + master.getMasterFileSystem().getClusterId().toString());
}
if (clusterKey.getKeyState() != ManagedKeyState.ACTIVE) {
- throw new IOException("System key is expected to be ACTIVE but it is: " +
- clusterKey.getKeyState() + " for metadata: " + clusterKey.getKeyMetadata());
+ throw new IOException("System key is expected to be ACTIVE but it is: "
+ + clusterKey.getKeyState() + " for metadata: " + clusterKey.getKeyMetadata());
}
if (clusterKey.getKeyMetadata() == null) {
throw new IOException("System key is expected to have metadata but it is null");
}
- if (! clusterKey.getKeyMetadata().equals(currentKeyMetadata) &&
- saveLatestSystemKey(clusterKey.getKeyMetadata(), allSystemKeyFiles)) {
+ if (
+ !clusterKey.getKeyMetadata().equals(currentKeyMetadata)
+ && saveLatestSystemKey(clusterKey.getKeyMetadata(), allSystemKeyFiles)
+ ) {
return clusterKey;
}
return null;
}
private boolean saveLatestSystemKey(String keyMetadata, List allSystemKeyFiles)
- throws IOException {
- int nextSystemKeySeq = (allSystemKeyFiles.isEmpty() ? -1
+ throws IOException {
+ int nextSystemKeySeq = (allSystemKeyFiles.isEmpty()
+ ? -1
: SystemKeyAccessor.extractKeySequence(allSystemKeyFiles.get(0))) + 1;
LOG.info("Trying to save a new cluster key at seq: {}", nextSystemKeySeq);
MasterFileSystem masterFS = master.getMasterFileSystem();
- Path nextSystemKeyPath = new Path(systemKeyDir,
- SYSTEM_KEY_FILE_PREFIX + nextSystemKeySeq);
- Path tempSystemKeyFile = new Path(masterFS.getTempDir(),
- nextSystemKeyPath.getName() + UUID.randomUUID());
- try (FSDataOutputStream fsDataOutputStream = masterFS.getFileSystem()
- .create(tempSystemKeyFile)) {
+ Path nextSystemKeyPath = new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + nextSystemKeySeq);
+ Path tempSystemKeyFile =
+ new Path(masterFS.getTempDir(), nextSystemKeyPath.getName() + UUID.randomUUID());
+ try (
+ FSDataOutputStream fsDataOutputStream = masterFS.getFileSystem().create(tempSystemKeyFile)) {
fsDataOutputStream.writeUTF(keyMetadata);
boolean succeeded = masterFS.getFileSystem().rename(tempSystemKeyFile, nextSystemKeyPath);
if (succeeded) {
@@ -115,8 +113,7 @@ private boolean saveLatestSystemKey(String keyMetadata, List allSystemKeyF
LOG.error("System key save failed for seq: {}", nextSystemKeySeq);
}
return succeeded;
- }
- finally {
+ } finally {
masterFS.getFileSystem().delete(tempSystemKeyFile, false);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index 65e8aa5e66e2..9bbaf8cd72d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -369,15 +369,18 @@ public ChoreService getChoreService() {
return null;
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
index 2d8ae446da3a..16fadfd81a15 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.keymeta;
import java.security.Key;
-
import org.apache.hadoop.hbase.io.crypto.KeyProvider;
public class DummyKeyProvider implements KeyProvider {
@@ -35,4 +34,4 @@ public Key[] getKeys(String[] aliases) {
public Key getKey(String alias) {
return null;
}
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
index 8e428c163127..3053e72ecea7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
@@ -19,7 +19,6 @@
import java.io.IOException;
import java.security.Key;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
@@ -89,4 +88,4 @@ public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace)
public void setMockedKey(String alias, java.security.Key key, String keyNamespace) {
delegate.setMockedKey(alias, key, keyNamespace);
}
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
index a0147e6e4e2e..caccf816c8a3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
@@ -29,15 +29,14 @@ public class ManagedKeyTestBase {
@Before
public void setUp() throws Exception {
TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
- MockManagedKeyProvider.class.getName());
+ MockManagedKeyProvider.class.getName());
TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes",
- KeymetaServiceEndpoint.class.getName());
+ KeymetaServiceEndpoint.class.getName());
// Start the minicluster
TEST_UTIL.startMiniCluster(1);
- TEST_UTIL.waitFor(60000,
- () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
+ TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
TEST_UTIL.waitUntilAllRegionsAssigned(KeymetaTableAccessor.KEY_META_TABLE_NAME);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
index ab871b241830..d476e0619ca4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
@@ -35,15 +35,15 @@
@Category({ MasterTests.class, SmallTests.class })
public class TestKeyManagementBase {
@ClassRule
- public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(
- TestKeyManagementBase.class);
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeyManagementBase.class);
@Test
public void testGetKeyProviderWithInvalidProvider() throws Exception {
// Setup configuration with a non-ManagedKeyProvider
Configuration conf = new Configuration();
conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
- "org.apache.hadoop.hbase.keymeta.DummyKeyProvider");
+ "org.apache.hadoop.hbase.keymeta.DummyKeyProvider");
Server mockServer = mock(Server.class);
when(mockServer.getConfiguration()).thenReturn(conf);
@@ -63,4 +63,4 @@ public TestKeyManagement(Server server) {
super(server);
}
}
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
index 7070596a93c0..bc8e14fe4b3d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
@@ -40,7 +40,6 @@
import java.util.Base64;
import java.util.List;
import javax.crypto.spec.SecretKeySpec;
-
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
@@ -96,15 +95,15 @@ public class TestKeymetaEndpoint {
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
keymetaServiceEndpoint = new KeymetaServiceEndpoint();
- CoprocessorEnvironment env = mock(CoprocessorEnvironment.class,
- withSettings().extraInterfaces(HasMasterServices.class));
+ CoprocessorEnvironment env =
+ mock(CoprocessorEnvironment.class, withSettings().extraInterfaces(HasMasterServices.class));
when(((HasMasterServices) env).getMasterServices()).thenReturn(master);
keymetaServiceEndpoint.start(env);
- keyMetaAdminService = (KeymetaAdminServiceImpl) keymetaServiceEndpoint.getServices()
- .iterator().next();
+ keyMetaAdminService =
+ (KeymetaAdminServiceImpl) keymetaServiceEndpoint.getServices().iterator().next();
responseBuilder = ManagedKeysResponse.newBuilder().setKeyState(KEY_ACTIVE);
- requestBuilder = ManagedKeysRequest.newBuilder()
- .setKeyNamespace(ManagedKeyData.KEY_SPACE_GLOBAL);
+ requestBuilder =
+ ManagedKeysRequest.newBuilder().setKeyNamespace(ManagedKeyData.KEY_SPACE_GLOBAL);
keyData1 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE,
new SecretKeySpec("key1".getBytes(), "AES"), ACTIVE, KEY_METADATA1);
keyData2 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE,
@@ -136,8 +135,8 @@ public void testConvertToKeyCustBytesInvalid() {
ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build();
// Act
- byte[] result = KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request,
- responseBuilder);
+ byte[] result =
+ KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request, responseBuilder);
// Assert
assertNull(result);
@@ -150,13 +149,12 @@ public void testGetResponseBuilder() {
// Arrange
String keyCust = Base64.getEncoder().encodeToString("testKey".getBytes());
String keyNamespace = "testNamespace";
- ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust)
- .setKeyNamespace(keyNamespace)
- .build();
+ ManagedKeysRequest request =
+ requestBuilder.setKeyCust(keyCust).setKeyNamespace(keyNamespace).build();
// Act
- ManagedKeysResponse.Builder result = KeymetaServiceEndpoint.getResponseBuilder(controller,
- request);
+ ManagedKeysResponse.Builder result =
+ KeymetaServiceEndpoint.getResponseBuilder(controller, request);
// Assert
assertNotNull(result);
@@ -170,13 +168,12 @@ public void testGetResponseBuilderWithInvalidBase64() {
// Arrange
String keyCust = "invalidBase64!";
String keyNamespace = "testNamespace";
- ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust)
- .setKeyNamespace(keyNamespace)
- .build();
+ ManagedKeysRequest request =
+ requestBuilder.setKeyCust(keyCust).setKeyNamespace(keyNamespace).build();
// Act
- ManagedKeysResponse.Builder result = KeymetaServiceEndpoint.getResponseBuilder(controller,
- request);
+ ManagedKeysResponse.Builder result =
+ KeymetaServiceEndpoint.getResponseBuilder(controller, request);
// Assert
assertNotNull(result);
@@ -188,15 +185,14 @@ public void testGetResponseBuilderWithInvalidBase64() {
@Test
public void testGenerateKeyStateResponse() throws Exception {
// Arrange
- ManagedKeysResponse response = responseBuilder.setKeyCustBytes(ByteString.copyFrom(
- keyData1.getKeyCustodian()))
- .setKeyNamespace(keyData1.getKeyNamespace())
- .build();
+ ManagedKeysResponse response =
+ responseBuilder.setKeyCustBytes(ByteString.copyFrom(keyData1.getKeyCustodian()))
+ .setKeyNamespace(keyData1.getKeyNamespace()).build();
List managedKeyStates = Arrays.asList(keyData1, keyData2);
// Act
- GetManagedKeysResponse result = KeymetaServiceEndpoint.generateKeyStateResponse(
- managedKeyStates, responseBuilder);
+ GetManagedKeysResponse result =
+ KeymetaServiceEndpoint.generateKeyStateResponse(managedKeyStates, responseBuilder);
// Assert
assertNotNull(response);
@@ -212,15 +208,14 @@ public void testGenerateKeyStateResponse() throws Exception {
@Test
public void testGenerateKeyStateResponse_Empty() throws Exception {
// Arrange
- ManagedKeysResponse response = responseBuilder.setKeyCustBytes(ByteString.copyFrom(
- keyData1.getKeyCustodian()))
- .setKeyNamespace(keyData1.getKeyNamespace())
- .build();
+ ManagedKeysResponse response =
+ responseBuilder.setKeyCustBytes(ByteString.copyFrom(keyData1.getKeyCustodian()))
+ .setKeyNamespace(keyData1.getKeyNamespace()).build();
List managedKeyStates = new ArrayList<>();
// Act
- GetManagedKeysResponse result = KeymetaServiceEndpoint.generateKeyStateResponse(
- managedKeyStates, responseBuilder);
+ GetManagedKeysResponse result =
+ KeymetaServiceEndpoint.generateKeyStateResponse(managedKeyStates, responseBuilder);
// Assert
assertNotNull(response);
@@ -231,16 +226,14 @@ public void testGenerateKeyStateResponse_Empty() throws Exception {
@Test
public void testGenerateKeyStatResponse_Success() throws Exception {
- doTestServiceCallForSuccess(
- (controller, request, done) ->
- keyMetaAdminService.enableKeyManagement(controller, request, done));
+ doTestServiceCallForSuccess((controller, request, done) -> keyMetaAdminService
+ .enableKeyManagement(controller, request, done));
}
@Test
public void testGetManagedKeys_Success() throws Exception {
doTestServiceCallForSuccess(
- (controller, request, done) ->
- keyMetaAdminService.getManagedKeys(controller, request, done));
+ (controller, request, done) -> keyMetaAdminService.getManagedKeys(controller, request, done));
}
private void doTestServiceCallForSuccess(ServiceCall svc) throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java
index f34d482d7940..9ccb3dc2568f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java
@@ -24,7 +24,6 @@
import static org.mockito.Mockito.when;
import java.io.IOException;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
@@ -197,4 +196,4 @@ public void testMultipleInitCalls() throws Exception {
verify(mockTableDescriptors, times(3)).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
verify(mockMaster, never()).createSystemTable(any());
}
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
index 3b3c4c23dc7d..12b74e1c3bcc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
@@ -52,7 +52,6 @@
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -89,11 +88,9 @@
import org.mockito.MockitoAnnotations;
@RunWith(Suite.class)
-@Suite.SuiteClasses({
- TestKeymetaTableAccessor.TestAdd.class,
+@Suite.SuiteClasses({ TestKeymetaTableAccessor.TestAdd.class,
TestKeymetaTableAccessor.TestAddWithNullableFields.class,
- TestKeymetaTableAccessor.TestGet.class,
-})
+ TestKeymetaTableAccessor.TestGet.class, })
@Category({ MasterTests.class, SmallTests.class })
public class TestKeymetaTableAccessor {
protected static final String ALIAS = "custId1";
@@ -149,23 +146,20 @@ public void tearDown() throws Exception {
@Category({ MasterTests.class, SmallTests.class })
public static class TestAdd extends TestKeymetaTableAccessor {
@ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestAdd.class);
+ public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdd.class);
@Parameter(0)
public ManagedKeyState keyState;
@Parameterized.Parameters(name = "{index},keyState={0}")
public static Collection data() {
- return Arrays.asList(
- new Object[][] { { ACTIVE }, { FAILED }, { INACTIVE }, { DISABLED }, });
+ return Arrays.asList(new Object[][] { { ACTIVE }, { FAILED }, { INACTIVE }, { DISABLED }, });
}
@Test
public void testAddKey() throws Exception {
managedKeyProvider.setMockedKeyState(ALIAS, keyState);
- ManagedKeyData keyData =
- managedKeyProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
accessor.addKey(keyData);
@@ -176,8 +170,7 @@ public void testAddKey() throws Exception {
if (keyState == ACTIVE) {
assertPut(keyData, puts.get(0), constructRowKeyForCustNamespace(keyData));
assertPut(keyData, puts.get(1), constructRowKeyForMetadata(keyData));
- }
- else {
+ } else {
assertPut(keyData, puts.get(0), constructRowKeyForMetadata(keyData));
}
}
@@ -204,8 +197,8 @@ public void testAddKeyWithFailedStateAndNullMetadata() throws Exception {
Put put = puts.get(0);
// Verify the row key uses state value for metadata hash
- byte[] expectedRowKey = constructRowKeyForMetadata(CUST_ID, KEY_SPACE_GLOBAL,
- new byte[] { FAILED.getVal() });
+ byte[] expectedRowKey =
+ constructRowKeyForMetadata(CUST_ID, KEY_SPACE_GLOBAL, new byte[] { FAILED.getVal() });
assertEquals(0, Bytes.compareTo(expectedRowKey, put.getRow()));
Map valueMap = getValueMap(put);
@@ -225,8 +218,7 @@ public void testAddKeyWithFailedStateAndNullMetadata() throws Exception {
@Category({ MasterTests.class, SmallTests.class })
public static class TestGet extends TestKeymetaTableAccessor {
@ClassRule
- public static final HBaseClassTestRule CLASS_RULE =
- HBaseClassTestRule.forClass(TestGet.class);
+ public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGet.class);
@Mock
private Result result1;
@@ -275,8 +267,8 @@ public void testGetActiveKeyMissingWrappedKey() throws Exception {
ex = assertThrows(IOException.class,
() -> accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, KEY_METADATA));
assertEquals("ACTIVE key must have a wrapped key", ex.getMessage());
- ex = assertThrows(IOException.class, () ->
- accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, KEY_METADATA));
+ ex = assertThrows(IOException.class,
+ () -> accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, KEY_METADATA));
assertEquals("INACTIVE key must have a wrapped key", ex.getMessage());
}
@@ -303,8 +295,8 @@ public void testGetKeyWithWrappedKey() throws Exception {
assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian()));
assertEquals(KEY_NAMESPACE, result.getKeyNamespace());
assertEquals(keyData.getKeyMetadata(), result.getKeyMetadata());
- assertEquals(0, Bytes.compareTo(keyData.getTheKey().getEncoded(),
- result.getTheKey().getEncoded()));
+ assertEquals(0,
+ Bytes.compareTo(keyData.getTheKey().getEncoded(), result.getTheKey().getEncoded()));
assertEquals(ACTIVE, result.getKeyState());
// When DEK checksum doesn't match, we expect a null value.
@@ -384,8 +376,8 @@ public void testGetActiveKey() throws Exception {
private ManagedKeyData setupActiveKey(byte[] custId, Result result) throws Exception {
ManagedKeyData keyData = managedKeyProvider.getManagedKey(custId, KEY_NAMESPACE);
- byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(conf, null,
- keyData.getTheKey(), latestSystemKey.getTheKey());
+ byte[] dekWrappedBySTK =
+ EncryptionUtil.wrapKey(conf, null, keyData.getTheKey(), latestSystemKey.getTheKey());
when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_WRAPPED_BY_STK_QUAL_BYTES)))
.thenReturn(dekWrappedBySTK);
when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_CHECKSUM_QUAL_BYTES)))
@@ -410,8 +402,7 @@ protected void assertPut(ManagedKeyData keyData, Put put, byte[] rowKey) {
assertNotNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES)));
assertEquals(new Bytes(Bytes.toBytes(latestSystemKey.getKeyChecksum())),
valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES)));
- }
- else {
+ } else {
assertNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES)));
assertNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES)));
assertNull(valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES)));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
index f7afa7ee5891..61678e316ceb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
@@ -18,16 +18,15 @@
package org.apache.hadoop.hbase.keymeta;
import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
-import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED;
import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.clearInvocations;
import static org.mockito.Mockito.doReturn;
@@ -43,7 +42,6 @@
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.stream.Collectors;
-
import net.bytebuddy.ByteBuddy;
import net.bytebuddy.dynamic.loading.ClassLoadingStrategy;
import net.bytebuddy.implementation.MethodDelegation;
@@ -51,18 +49,6 @@
import net.bytebuddy.implementation.bind.annotation.Origin;
import net.bytebuddy.implementation.bind.annotation.RuntimeType;
import net.bytebuddy.matcher.ElementMatchers;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.BlockJUnit4ClassRunner;
-import org.junit.runners.Suite;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.mockito.Spy;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -74,14 +60,23 @@
import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Suite;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.Spy;
@RunWith(Suite.class)
-@Suite.SuiteClasses({
- TestManagedKeyDataCache.TestGeneric.class,
- TestManagedKeyDataCache.TestWithoutL2Cache.class,
- TestManagedKeyDataCache.TestWithL2CacheAndNoDynamicLookup.class,
- TestManagedKeyDataCache.TestWithL2CacheAndDynamicLookup.class,
-})
+@Suite.SuiteClasses({ TestManagedKeyDataCache.TestGeneric.class,
+ TestManagedKeyDataCache.TestWithoutL2Cache.class,
+ TestManagedKeyDataCache.TestWithL2CacheAndNoDynamicLookup.class,
+ TestManagedKeyDataCache.TestWithL2CacheAndDynamicLookup.class, })
@Category({ MasterTests.class, SmallTests.class })
public class TestManagedKeyDataCache {
private static final String ALIAS = "cust1";
@@ -119,16 +114,14 @@ public static synchronized void setUpInterceptor() {
if (providerClass != null) {
return;
}
- providerClass = new ByteBuddy()
- .subclass(MockManagedKeyProvider.class)
- .name("org.apache.hadoop.hbase.io.crypto.MockManagedKeyProviderSpy")
- .method(ElementMatchers.any()) // Intercept all methods
- // Using a delegator instead of directly forwarding to testProvider to
- // facilitate switching the testProvider instance. Besides, it
- .intercept(MethodDelegation.to(new ForwardingInterceptor()))
- .make()
- .load(MockManagedKeyProvider.class.getClassLoader(), ClassLoadingStrategy.Default.INJECTION)
- .getLoaded();
+ providerClass = new ByteBuddy().subclass(MockManagedKeyProvider.class)
+ .name("org.apache.hadoop.hbase.io.crypto.MockManagedKeyProviderSpy")
+ .method(ElementMatchers.any()) // Intercept all methods
+ // Using a delegator instead of directly forwarding to testProvider to
+ // facilitate switching the testProvider instance. Besides, it
+ .intercept(MethodDelegation.to(new ForwardingInterceptor())).make()
+ .load(MockManagedKeyProvider.class.getClassLoader(), ClassLoadingStrategy.Default.INJECTION)
+ .getLoaded();
}
@Before
@@ -162,33 +155,33 @@ public void testEmptyCache() throws Exception {
@Test
public void testActiveKeysCacheKeyEqualsAndHashCode() {
- byte[] custodian1 = new byte[] {1, 2, 3};
- byte[] custodian2 = new byte[] {1, 2, 3};
- byte[] custodian3 = new byte[] {4, 5, 6};
+ byte[] custodian1 = new byte[] { 1, 2, 3 };
+ byte[] custodian2 = new byte[] { 1, 2, 3 };
+ byte[] custodian3 = new byte[] { 4, 5, 6 };
String namespace1 = "ns1";
String namespace2 = "ns2";
// Reflexive
ManagedKeyDataCache.ActiveKeysCacheKey key1 =
- new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace1);
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace1);
assertTrue(key1.equals(key1));
// Symmetric and consistent for equal content
ManagedKeyDataCache.ActiveKeysCacheKey key2 =
- new ManagedKeyDataCache.ActiveKeysCacheKey(custodian2, namespace1);
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian2, namespace1);
assertTrue(key1.equals(key2));
assertTrue(key2.equals(key1));
assertEquals(key1.hashCode(), key2.hashCode());
// Different custodian
ManagedKeyDataCache.ActiveKeysCacheKey key3 =
- new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace1);
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace1);
assertFalse(key1.equals(key3));
assertFalse(key3.equals(key1));
// Different namespace
ManagedKeyDataCache.ActiveKeysCacheKey key4 =
- new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace2);
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace2);
assertFalse(key1.equals(key4));
assertFalse(key4.equals(key1));
@@ -198,7 +191,7 @@ public void testActiveKeysCacheKeyEqualsAndHashCode() {
// Both fields different
ManagedKeyDataCache.ActiveKeysCacheKey key5 =
- new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace2);
+ new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace2);
assertFalse(key1.equals(key5));
assertFalse(key5.equals(key1));
}
@@ -227,7 +220,7 @@ public void testGenericCacheForNonExistentKey() throws Exception {
public void testWithInvalidProvider() throws Exception {
ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
doThrow(new IOException("Test exception")).when(testProvider).unwrapKey(any(String.class),
- any());
+ any());
assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null));
verify(testProvider).unwrapKey(any(String.class), any());
// A second call to getEntry should not result in a call to the provider due to -ve entry.
@@ -235,7 +228,7 @@ public void testWithInvalidProvider() throws Exception {
verify(testProvider, never()).unwrapKey(any(String.class), any());
assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null));
doThrow(new IOException("Test exception")).when(testProvider).getManagedKey(any(),
- any(String.class));
+ any(String.class));
assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
verify(testProvider).getManagedKey(any(), any(String.class));
// A second call to getRandomEntry should not result in a call to the provider due to -ve
@@ -248,18 +241,18 @@ public void testWithInvalidProvider() throws Exception {
@Test
public void testGenericCache() throws Exception {
ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
- assertEquals(globalKey1, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL,
- globalKey1.getKeyMetadata(), null));
+ assertEquals(globalKey1,
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null));
verify(testProvider).getManagedKey(any(), any(String.class));
clearInvocations(testProvider);
ManagedKeyData globalKey2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
- assertEquals(globalKey2, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL,
- globalKey2.getKeyMetadata(), null));
+ assertEquals(globalKey2,
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey2.getKeyMetadata(), null));
verify(testProvider).getManagedKey(any(), any(String.class));
clearInvocations(testProvider);
ManagedKeyData globalKey3 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
- assertEquals(globalKey3, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL,
- globalKey3.getKeyMetadata(), null));
+ assertEquals(globalKey3,
+ cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey3.getKeyMetadata(), null));
verify(testProvider).getManagedKey(any(), any(String.class));
}
@@ -281,8 +274,7 @@ public void testGenericCacheOperations() throws Exception {
assertGenericCacheEntries(nsKey1, globalKey1);
ManagedKeyData globalKey2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
assertGenericCacheEntries(globalKey2, nsKey1, globalKey1);
- ManagedKeyData nsKey2 = testProvider.getManagedKey(CUST_ID,
- "namespace1");
+ ManagedKeyData nsKey2 = testProvider.getManagedKey(CUST_ID, "namespace1");
assertGenericCacheEntries(nsKey2, globalKey2, nsKey1, globalKey1);
}
@@ -408,8 +400,7 @@ public void testGenericCacheNonExistentKeyInL2Cache() throws Exception {
@Test
public void testGenericCacheRetrievalFromL2Cache() throws Exception {
ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
- when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata()))
- .thenReturn(key);
+ when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata())).thenReturn(key);
assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null));
verify(mockL2).getKey(any(), any(String.class), any(String.class));
}
@@ -426,8 +417,7 @@ public void testActiveKeysCacheNonExistentKeyInL2Cache() throws Exception {
@Test
public void testActiveKeysCacheRetrievalFromL2Cache() throws Exception {
ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
- when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL))
- .thenReturn(key);
+ when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL)).thenReturn(key);
assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
verify(mockL2).getActiveKey(any(), any(String.class));
}
@@ -461,8 +451,7 @@ public void testActiveKeysCacheUsesKeymetaAccessorWhenGenericCacheEmpty() throws
// Mock the keymetaAccessor to return a key
ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL);
- when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL))
- .thenReturn(key);
+ when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL)).thenReturn(key);
// Get the active entry - it should call keymetaAccessor since generic cache is empty
assertEquals(key, cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
@@ -505,7 +494,8 @@ public void testAddKeyFailure() throws Exception {
@Test
public void testGenericCacheDynamicLookupUnexpectedException() throws Exception {
- doThrow(new RuntimeException("Test exception")).when(testProvider).unwrapKey(any(String.class), any());
+ doThrow(new RuntimeException("Test exception")).when(testProvider)
+ .unwrapKey(any(String.class), any());
assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
verify(mockL2).getKey(any(), any(String.class), any(String.class));
@@ -515,7 +505,7 @@ public void testGenericCacheDynamicLookupUnexpectedException() throws Exception
@Test
public void testActiveKeysCacheDynamicLookupWithUnexpectedException() throws Exception {
doThrow(new RuntimeException("Test exception")).when(testProvider).getManagedKey(any(),
- any(String.class));
+ any(String.class));
assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
verify(testProvider).getManagedKey(any(), any(String.class));
clearInvocations(testProvider);
@@ -586,17 +576,16 @@ public void testThatActiveKeysCache_PopulatedByGenericCache() throws Exception {
}
protected void assertGenericCacheEntries(ManagedKeyData... keys) throws Exception {
- for (ManagedKeyData key: keys) {
- assertEquals(key, cache.getEntry(key.getKeyCustodian(), key.getKeyNamespace(),
- key.getKeyMetadata(), null));
+ for (ManagedKeyData key : keys) {
+ assertEquals(key,
+ cache.getEntry(key.getKeyCustodian(), key.getKeyNamespace(), key.getKeyMetadata(), null));
}
assertEquals(keys.length, cache.getGenericCacheEntryCount());
- int activeKeysCount = Arrays.stream(keys)
- .filter(key -> key.getKeyState() == ManagedKeyState.ACTIVE)
- .map(key -> new ManagedKeyDataCache.ActiveKeysCacheKey(key.getKeyCustodian(),
- key.getKeyNamespace()))
- .collect(Collectors.toSet())
- .size();
+ int activeKeysCount =
+ Arrays.stream(keys).filter(key -> key.getKeyState() == ManagedKeyState.ACTIVE)
+ .map(key -> new ManagedKeyDataCache.ActiveKeysCacheKey(key.getKeyCustodian(),
+ key.getKeyNamespace()))
+ .collect(Collectors.toSet()).size();
assertEquals(activeKeysCount, cache.getActiveCacheEntryCount());
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
index 1ffed4707475..52659b6cf2a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
@@ -29,7 +29,6 @@
import java.lang.reflect.Field;
import java.security.KeyException;
import java.util.List;
-
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
@@ -68,8 +67,8 @@ public void testEnableOverRPC() throws Exception {
private void doTestEnable(KeymetaAdmin adminClient) throws IOException, KeyException {
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
- MockManagedKeyProvider managedKeyProvider = (MockManagedKeyProvider)
- Encryption.getKeyProvider(master.getConfiguration());
+ MockManagedKeyProvider managedKeyProvider =
+ (MockManagedKeyProvider) Encryption.getKeyProvider(master.getConfiguration());
String cust = "cust1";
String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes());
List managedKeyStates =
@@ -79,26 +78,24 @@ private void doTestEnable(KeymetaAdmin adminClient) throws IOException, KeyExcep
List managedKeys =
adminClient.getManagedKeys(encodedCust, ManagedKeyData.KEY_SPACE_GLOBAL);
assertEquals(1, managedKeys.size());
- assertEquals(managedKeyProvider.getLastGeneratedKeyData(cust,
- ManagedKeyData.KEY_SPACE_GLOBAL).cloneWithoutKey(), managedKeys.get(0).cloneWithoutKey());
+ assertEquals(managedKeyProvider.getLastGeneratedKeyData(cust, ManagedKeyData.KEY_SPACE_GLOBAL)
+ .cloneWithoutKey(), managedKeys.get(0).cloneWithoutKey());
String nonExistentCust = "nonExistentCust";
managedKeyProvider.setMockedKeyState(nonExistentCust, ManagedKeyState.FAILED);
- List keyDataList1 =
- adminClient.enableKeyManagement(ManagedKeyProvider.encodeToStr(nonExistentCust.getBytes()),
- ManagedKeyData.KEY_SPACE_GLOBAL);
+ List keyDataList1 = adminClient.enableKeyManagement(
+ ManagedKeyProvider.encodeToStr(nonExistentCust.getBytes()), ManagedKeyData.KEY_SPACE_GLOBAL);
assertKeyDataListSingleKey(keyDataList1, ManagedKeyState.FAILED);
String disabledCust = "disabledCust";
managedKeyProvider.setMockedKeyState(disabledCust, ManagedKeyState.DISABLED);
- List keyDataList2 =
- adminClient.enableKeyManagement(ManagedKeyProvider.encodeToStr(disabledCust.getBytes()),
- ManagedKeyData.KEY_SPACE_GLOBAL);
+ List keyDataList2 = adminClient.enableKeyManagement(
+ ManagedKeyProvider.encodeToStr(disabledCust.getBytes()), ManagedKeyData.KEY_SPACE_GLOBAL);
assertKeyDataListSingleKey(keyDataList2, ManagedKeyState.DISABLED);
}
private static void assertKeyDataListSingleKey(List managedKeyStates,
- ManagedKeyState keyState) {
+ ManagedKeyState keyState) {
assertNotNull(managedKeyStates);
assertEquals(1, managedKeyStates.size());
assertEquals(keyState, managedKeyStates.get(0).getKeyState());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
index a92818f8aada..9882b823da8c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
@@ -33,7 +33,6 @@
import java.util.Collections;
import java.util.List;
import javax.crypto.spec.SecretKeySpec;
-
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
@@ -86,12 +85,12 @@ public void setUp() {
testKey3 = new SecretKeySpec("test-key-3-bytes".getBytes(), "AES");
// Create test key data with different checksums
- keyData1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey1,
- ManagedKeyState.ACTIVE, TEST_METADATA_1, 1000L);
- keyData2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey2,
- ManagedKeyState.ACTIVE, TEST_METADATA_2, 2000L);
- keyData3 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey3,
- ManagedKeyState.ACTIVE, TEST_METADATA_3, 3000L);
+ keyData1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey1, ManagedKeyState.ACTIVE,
+ TEST_METADATA_1, 1000L);
+ keyData2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey2, ManagedKeyState.ACTIVE,
+ TEST_METADATA_2, 2000L);
+ keyData3 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey3, ManagedKeyState.ACTIVE,
+ TEST_METADATA_3, 3000L);
// Create test paths
keyPath1 = new Path("/system/keys/key1");
@@ -272,10 +271,10 @@ public void testCacheWithKeysHavingSameChecksum() throws Exception {
Key sameKey1 = new SecretKeySpec("identical-bytes".getBytes(), "AES");
Key sameKey2 = new SecretKeySpec("identical-bytes".getBytes(), "AES");
- ManagedKeyData sameManagedKey1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE,
- sameKey1, ManagedKeyState.ACTIVE, "metadata-A", 1000L);
- ManagedKeyData sameManagedKey2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE,
- sameKey2, ManagedKeyState.ACTIVE, "metadata-B", 2000L);
+ ManagedKeyData sameManagedKey1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, sameKey1,
+ ManagedKeyState.ACTIVE, "metadata-A", 1000L);
+ ManagedKeyData sameManagedKey2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, sameKey2,
+ ManagedKeyState.ACTIVE, "metadata-B", 2000L);
// Verify they have the same checksum
assertEquals(sameManagedKey1.getKeyChecksum(), sameManagedKey2.getKeyChecksum());
@@ -307,4 +306,4 @@ public void testCreateCacheWithUnexpectedNullKeyData() throws Exception {
});
assertTrue(ex.getMessage().equals("Key load error"));
}
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index eaa4c2255277..6af4dcec1ad2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -119,15 +119,18 @@ public ChoreService getChoreService() {
return null;
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index b63bbbaac8be..ba3387b8ad1d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -559,15 +559,18 @@ public ChoreService getChoreService() {
return null;
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index ed11d69420ac..02d76a9af3af 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -331,15 +331,18 @@ public ActiveMasterManager getActiveMasterManager() {
return activeMasterManager;
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
index 9304029aedf7..08ffef8e0e9f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
@@ -27,9 +27,7 @@
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThrows;
import static org.junit.Assume.assumeTrue;
-import static org.mockito.ArgumentMatchers.argThat;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -39,7 +37,6 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -73,8 +70,7 @@
@RunWith(Suite.class)
@Suite.SuiteClasses({ TestKeymetaAdminImpl.TestWhenDisabled.class,
TestKeymetaAdminImpl.TestAdminImpl.class,
- TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class,
-})
+ TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class, })
@Category({ MasterTests.class, SmallTests.class })
public class TestKeymetaAdminImpl {
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
@@ -82,7 +78,6 @@ public class TestKeymetaAdminImpl {
private static final String CUST = "cust1";
private static final String ENCODED_CUST = ManagedKeyProvider.encodeToStr(CUST.getBytes());
-
@Rule
public TestName name = new TestName();
@@ -124,12 +119,10 @@ public void setUp() throws Exception {
@Test
public void testDisabled() throws Exception {
+ assertThrows(IOException.class, () -> keymetaAdmin
+ .enableKeyManagement(ManagedKeyData.KEY_GLOBAL_CUSTODIAN, KEY_SPACE_GLOBAL));
assertThrows(IOException.class,
- () -> keymetaAdmin.enableKeyManagement(ManagedKeyData.KEY_GLOBAL_CUSTODIAN,
- KEY_SPACE_GLOBAL));
- assertThrows(IOException.class,
- () -> keymetaAdmin.getManagedKeys(ManagedKeyData.KEY_GLOBAL_CUSTODIAN,
- KEY_SPACE_GLOBAL));
+ () -> keymetaAdmin.getManagedKeys(ManagedKeyData.KEY_GLOBAL_CUSTODIAN, KEY_SPACE_GLOBAL));
}
}
@@ -149,14 +142,9 @@ public static class TestAdminImpl extends TestKeymetaAdminImpl {
@Parameters(name = "{index},keySpace={1},keyState={2}")
public static Collection data() {
- return Arrays.asList(
- new Object[][] {
- { KEY_SPACE_GLOBAL, ACTIVE, false },
- { "ns1", ACTIVE, false },
- { KEY_SPACE_GLOBAL, FAILED, true },
- { KEY_SPACE_GLOBAL, INACTIVE, false },
- { KEY_SPACE_GLOBAL, DISABLED, true },
- });
+ return Arrays.asList(new Object[][] { { KEY_SPACE_GLOBAL, ACTIVE, false },
+ { "ns1", ACTIVE, false }, { KEY_SPACE_GLOBAL, FAILED, true },
+ { KEY_SPACE_GLOBAL, INACTIVE, false }, { KEY_SPACE_GLOBAL, DISABLED, true }, });
}
@Test
@@ -164,11 +152,10 @@ public void testEnableAndGet() throws Exception {
MockManagedKeyProvider managedKeyProvider =
(MockManagedKeyProvider) Encryption.getKeyProvider(conf);
managedKeyProvider.setMockedKeyState(CUST, keyState);
- when(keymetaAccessor.getActiveKey(CUST.getBytes(), keySpace)).thenReturn(
- managedKeyProvider.getManagedKey(CUST.getBytes(), keySpace));
+ when(keymetaAccessor.getActiveKey(CUST.getBytes(), keySpace))
+ .thenReturn(managedKeyProvider.getManagedKey(CUST.getBytes(), keySpace));
- List managedKeys =
- keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace);
+ List managedKeys = keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace);
assertNotNull(managedKeys);
assertEquals(1, managedKeys.size());
assertEquals(keyState, managedKeys.get(0).getKeyState());
@@ -217,11 +204,7 @@ public static class TestForKeyProviderNullReturn extends TestKeymetaAdminImpl {
@Parameters(name = "{index},keySpace={0}")
public static Collection data() {
- return Arrays.asList(
- new Object[][] {
- { KEY_SPACE_GLOBAL },
- { "ns1" },
- });
+ return Arrays.asList(new Object[][] { { KEY_SPACE_GLOBAL }, { "ns1" }, });
}
@Test
@@ -261,13 +244,12 @@ public ManagedKeyData getActiveKey(byte[] key_cust, String keyNamespace)
}
protected boolean assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState,
- Key expectedKey) {
+ Key expectedKey) {
assertNotNull(keyData);
assertEquals(expKeyState, keyData.getKeyState());
if (expectedKey == null) {
assertNull(keyData.getTheKey());
- }
- else {
+ } else {
byte[] keyBytes = keyData.getTheKey().getEncoded();
byte[] expectedKeyBytes = expectedKey.getEncoded();
assertEquals(expectedKeyBytes.length, keyBytes.length);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
index d7045b245616..1c4ad60a8da1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
@@ -36,7 +36,6 @@
import java.util.List;
import java.util.stream.IntStream;
import javax.crypto.spec.SecretKeySpec;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -71,13 +70,12 @@
import org.mockito.MockitoAnnotations;
@RunWith(Suite.class)
-@Suite.SuiteClasses({
- TestSystemKeyAccessorAndManager.TestAccessorWhenDisabled.class,
+@Suite.SuiteClasses({ TestSystemKeyAccessorAndManager.TestAccessorWhenDisabled.class,
TestSystemKeyAccessorAndManager.TestManagerWhenDisabled.class,
TestSystemKeyAccessorAndManager.TestAccessor.class,
TestSystemKeyAccessorAndManager.TestForInvalidFilenames.class,
TestSystemKeyAccessorAndManager.TestManagerForErrors.class,
- TestSystemKeyAccessorAndManager.TestAccessorMisc.class // ADD THIS
+ TestSystemKeyAccessorAndManager.TestAccessorMisc.class // ADD THIS
})
@Category({ MasterTests.class, SmallTests.class })
public class TestSystemKeyAccessorAndManager {
@@ -118,15 +116,18 @@ private static FileStatus createMockFile(String fileName) {
@RunWith(BlockJUnit4ClassRunner.class)
@Category({ MasterTests.class, SmallTests.class })
public static class TestAccessorWhenDisabled extends TestSystemKeyAccessorAndManager {
- @ClassRule public static final HBaseClassTestRule CLASS_RULE =
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestAccessorWhenDisabled.class);
- @Override public void setUp() throws Exception {
+ @Override
+ public void setUp() throws Exception {
super.setUp();
conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
}
- @Test public void test() throws Exception {
+ @Test
+ public void test() throws Exception {
assertNull(systemKeyManager.getAllSystemKeyFiles());
assertNull(systemKeyManager.getLatestSystemKeyFile().getFirst());
}
@@ -135,15 +136,18 @@ public static class TestAccessorWhenDisabled extends TestSystemKeyAccessorAndMan
@RunWith(BlockJUnit4ClassRunner.class)
@Category({ MasterTests.class, SmallTests.class })
public static class TestManagerWhenDisabled extends TestSystemKeyAccessorAndManager {
- @ClassRule public static final HBaseClassTestRule CLASS_RULE =
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestManagerWhenDisabled.class);
- @Override public void setUp() throws Exception {
+ @Override
+ public void setUp() throws Exception {
super.setUp();
conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false");
}
- @Test public void test() throws Exception {
+ @Test
+ public void test() throws Exception {
systemKeyManager.ensureSystemKeyInitialized();
assertNull(systemKeyManager.rotateSystemKeyIfChanged());
}
@@ -171,7 +175,7 @@ public void testGetWithSingle() throws Exception {
FileStatus mockFileStatus = createMockFile(fileName);
Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
- when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX+"*"))))
+ when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))))
.thenReturn(new FileStatus[] { mockFileStatus });
List files = systemKeyManager.getAllSystemKeyFiles();
@@ -181,18 +185,17 @@ public void testGetWithSingle() throws Exception {
Pair> latestSystemKeyFileResult = systemKeyManager.getLatestSystemKeyFile();
assertEquals(fileName, latestSystemKeyFileResult.getFirst().getName());
- assertEquals(1, SystemKeyAccessor.extractSystemKeySeqNum(
- latestSystemKeyFileResult.getFirst()));
+ assertEquals(1,
+ SystemKeyAccessor.extractSystemKeySeqNum(latestSystemKeyFileResult.getFirst()));
}
@Test
public void testGetWithMultiple() throws Exception {
FileStatus[] mockFileStatuses = IntStream.rangeClosed(1, 3)
- .mapToObj(i -> createMockFile(SYSTEM_KEY_FILE_PREFIX + i))
- .toArray(FileStatus[]::new);
+ .mapToObj(i -> createMockFile(SYSTEM_KEY_FILE_PREFIX + i)).toArray(FileStatus[]::new);
Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
- when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX+"*"))))
+ when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))))
.thenReturn(mockFileStatuses);
List files = systemKeyManager.getAllSystemKeyFiles();
@@ -205,8 +208,7 @@ public void testGetWithMultiple() throws Exception {
@Test
public void testExtractKeySequenceForInvalidFilename() throws Exception {
- assertEquals(-1, SystemKeyAccessor.extractKeySequence(
- createMockFile("abcd").getPath()));
+ assertEquals(-1, SystemKeyAccessor.extractKeySequence(createMockFile("abcd").getPath()));
}
}
@@ -224,11 +226,10 @@ public static class TestForInvalidFilenames extends TestSystemKeyAccessorAndMana
@Parameters(name = "{index},fileName={0}")
public static Collection data() {
- return Arrays.asList(new Object[][] {
- { "abcd", "Couldn't parse key file name: abcd" },
- {SYSTEM_KEY_FILE_PREFIX+"abcd", "Couldn't parse key file name: "+
- SYSTEM_KEY_FILE_PREFIX+"abcd"},
- // Add more test cases here
+ return Arrays.asList(new Object[][] { { "abcd", "Couldn't parse key file name: abcd" },
+ { SYSTEM_KEY_FILE_PREFIX + "abcd",
+ "Couldn't parse key file name: " + SYSTEM_KEY_FILE_PREFIX + "abcd" },
+ // Add more test cases here
});
}
@@ -296,8 +297,9 @@ public void testEnsureSystemKeyInitialized_WithNoNonActiveKey() throws Exception
when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData);
IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized);
- assertEquals("System key is expected to be ACTIVE but it is: INACTIVE for metadata: "
- + metadata, ex.getMessage());
+ assertEquals(
+ "System key is expected to be ACTIVE but it is: INACTIVE for metadata: " + metadata,
+ ex.getMessage());
}
@Test
@@ -324,8 +326,8 @@ public void testEnsureSystemKeyInitialized_WithSaveFailure() throws Exception {
when(mockFileSystem.create(any())).thenReturn(mockStream);
when(mockFileSystem.rename(any(), any())).thenReturn(false);
- RuntimeException ex = assertThrows(RuntimeException.class,
- manager::ensureSystemKeyInitialized);
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, manager::ensureSystemKeyInitialized);
assertEquals("Failed to generate or save System Key", ex.getMessage());
}
@@ -344,10 +346,8 @@ public void testEnsureSystemKeyInitialized_RaceCondition() throws Exception {
when(mockFileSystem.rename(any(), any())).thenReturn(false);
String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
FileStatus mockFileStatus = createMockFile(fileName);
- when(mockFileSystem.globStatus(any())).thenReturn(
- new FileStatus[0],
- new FileStatus[] { mockFileStatus }
- );
+ when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0],
+ new FileStatus[] { mockFileStatus });
manager.ensureSystemKeyInitialized();
}
@@ -367,8 +367,7 @@ public void testLoadSystemKeySuccess() throws Exception {
// Create test key data
Key testKey = new SecretKeySpec("test-key-bytes".getBytes(), "AES");
- ManagedKeyData testKeyData = new ManagedKeyData(
- "custodian".getBytes(), "namespace", testKey,
+ ManagedKeyData testKeyData = new ManagedKeyData("custodian".getBytes(), "namespace", testKey,
ManagedKeyState.ACTIVE, testMetadata, 1000L);
// Mock key provider
@@ -432,8 +431,6 @@ public void testExtractSystemKeySeqNumValid() throws Exception {
assertEquals(Integer.MAX_VALUE, SystemKeyAccessor.extractSystemKeySeqNum(testPathMax));
}
-
-
@Test(expected = IOException.class)
public void testGetAllSystemKeyFilesIOException() throws Exception {
when(mockFileSystem.globStatus(any())).thenThrow(new IOException("Filesystem error"));
@@ -507,10 +504,10 @@ private static class MockSystemKeyManager extends SystemKeyManager {
private final ManagedKeyProvider keyProvider;
public MockSystemKeyManager(MasterServices master, ManagedKeyProvider keyProvider)
- throws IOException {
+ throws IOException {
super(master);
this.keyProvider = keyProvider;
- //systemKeyDir = mock(Path.class);
+ // systemKeyDir = mock(Path.class);
}
@Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
index a764a5b7de87..e73c181a74fd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
@@ -24,7 +24,6 @@
import java.io.IOException;
import java.security.Key;
-
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.KeyProvider;
@@ -104,8 +103,7 @@ private ManagedKeyData validateInitialState(HMaster master, MockManagedKeyProvid
assertNotNull(systemKeyCache);
ManagedKeyData clusterKey = systemKeyCache.getLatestSystemKey();
assertEquals(pbeKeyProvider.getSystemKey(master.getClusterId().getBytes()), clusterKey);
- assertEquals(clusterKey,
- systemKeyCache.getSystemKeyByChecksum(clusterKey.getKeyChecksum()));
+ assertEquals(clusterKey, systemKeyCache.getSystemKeyByChecksum(clusterKey.getKeyChecksum()));
return clusterKey;
}
@@ -113,7 +111,6 @@ private void restartSystem() throws Exception {
TEST_UTIL.shutdownMiniHBaseCluster();
Thread.sleep(2000);
TEST_UTIL.restartHBaseCluster(1);
- TEST_UTIL.waitFor(60000,
- () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
+ TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index ae507f32fd58..a5e3dd1a5b83 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -218,15 +218,18 @@ public Connection getConnection() {
}
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
index 18b7744e17cb..790435f6a47e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
@@ -61,15 +61,18 @@ public ServerName getServerName() {
return serverName;
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 6ed289ab96d1..22be21811950 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -840,15 +840,18 @@ public ChoreService getChoreService() {
return null;
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index adc420409527..5d62d6a908c0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -353,15 +353,18 @@ public ChoreService getChoreService() {
return null;
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 9257b78d6ce7..98283db19146 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -103,15 +103,18 @@ public ChoreService getChoreService() {
throw new UnsupportedOperationException();
}
- @Override public SystemKeyCache getSystemKeyCache() {
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
return null;
}
- @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
return null;
}
- @Override public KeymetaAdmin getKeymetaAdmin() {
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
return null;
}
From 4d4d167f2309e4df355f3da23e1d5c67a27695b3 Mon Sep 17 00:00:00 2001
From: Hari Krishna Dara
Date: Wed, 24 Sep 2025 09:39:30 +0530
Subject: [PATCH 4/9] HBASE-29495: Integrate key management with existing
encryption (#7297)
---
.gitignore | 1 +
.../hadoop/hbase/security/EncryptionUtil.java | 52 --
.../org/apache/hadoop/hbase/HConstants.java | 6 +
.../hadoop/hbase/io/crypto/Context.java | 20 +
.../hadoop/hbase/io/crypto/Encryption.java | 6 -
.../hbase/io/crypto/ManagedKeyData.java | 8 +-
.../hbase/io/crypto/MockAesKeyProvider.java | 16 +-
.../hadoop/hbase/util/CommonFSUtils.java | 35 +-
.../hbase/io/crypto/KeymetaTestUtils.java | 198 +++++
.../io/crypto/TestKeyStoreKeyProvider.java | 56 +-
.../io/crypto/TestManagedKeyProvider.java | 125 ++-
.../src/main/protobuf/server/io/HFile.proto | 3 +
.../apache/hadoop/hbase/HBaseServerBase.java | 8 +-
.../hbase/MockRegionServerServices.java | 6 +
.../java/org/apache/hadoop/hbase/Server.java | 16 +-
.../hbase/client/ClientSideRegionScanner.java | 2 +-
.../org/apache/hadoop/hbase/io/HFileLink.java | 9 +
.../hbase/io/hfile/FixedFileTrailer.java | 60 ++
.../apache/hadoop/hbase/io/hfile/HFile.java | 14 +-
.../hadoop/hbase/io/hfile/HFileInfo.java | 37 +-
.../hbase/io/hfile/HFileWriterImpl.java | 31 +-
.../hadoop/hbase/io/hfile/ReaderContext.java | 17 +-
.../hbase/io/hfile/ReaderContextBuilder.java | 19 +-
.../hbase/keymeta/KeyManagementBase.java | 16 +-
.../hbase/keymeta/KeyManagementService.java | 78 ++
.../hbase/keymeta/KeyNamespaceUtil.java | 90 +++
.../hbase/keymeta/KeymetaServiceEndpoint.java | 10 +-
.../hbase/keymeta/KeymetaTableAccessor.java | 42 +-
.../hbase/keymeta/SystemKeyAccessor.java | 29 +-
.../hadoop/hbase/keymeta/SystemKeyCache.java | 15 +
.../hadoop/hbase/master/MasterServices.java | 3 +-
.../hadoop/hbase/master/SystemKeyManager.java | 6 +-
.../procedure/CreateTableProcedure.java | 3 +-
.../master/procedure/InitMetaProcedure.java | 13 +-
.../hbase/master/region/MasterRegion.java | 42 +-
.../master/region/MasterRegionFactory.java | 4 +-
.../master/region/MasterRegionParams.java | 8 +-
.../hadoop/hbase/regionserver/HRegion.java | 290 +++++--
.../hadoop/hbase/regionserver/HStore.java | 7 +-
.../hadoop/hbase/regionserver/HStoreFile.java | 42 +-
.../regionserver/RegionServerServices.java | 4 +-
.../regionserver/RegionServicesForStores.java | 10 +
.../hbase/regionserver/StoreEngine.java | 12 +-
.../hbase/regionserver/StoreFileInfo.java | 14 +-
.../storefiletracker/StoreFileTracker.java | 7 +
.../StoreFileTrackerBase.java | 5 +
.../regionserver/ReplicationSyncUp.java | 24 +-
.../hadoop/hbase/security/SecurityUtil.java | 189 ++++-
.../hadoop/hbase/util/ModifyRegionUtils.java | 30 +-
.../hbase/keymeta/ManagedKeyTestBase.java | 7 +-
.../hbase/keymeta/TestKeyManagementBase.java | 25 +-
.../keymeta/TestKeyManagementService.java | 102 +++
.../hbase/keymeta/TestKeyNamespaceUtil.java | 126 +++
.../hbase/keymeta/TestKeymetaEndpoint.java | 10 +-
.../keymeta/TestKeymetaTableAccessor.java | 5 +-
.../keymeta/TestManagedKeyDataCache.java | 4 +-
.../hbase/keymeta/TestSystemKeyCache.java | 3 +-
.../master/MasterStateStoreTestBase.java | 3 +-
.../hbase/master/MockNoopMasterServices.java | 7 +-
.../hadoop/hbase/master/MockRegionServer.java | 6 +
.../hbase/master/TestActiveMasterManager.java | 16 +-
.../hbase/master/TestKeymetaAdminImpl.java | 9 +-
.../TestSystemKeyAccessorAndManager.java | 39 +-
.../cleaner/TestReplicationHFileCleaner.java | 24 +-
.../master/region/MasterRegionTestBase.java | 6 +-
.../TestMasterRegionOnTwoFileSystems.java | 4 +-
...onProcedureStorePerformanceEvaluation.java | 31 +-
.../region/RegionProcedureStoreTestBase.java | 4 +-
.../RegionProcedureStoreTestHelper.java | 5 +-
.../TestRegionProcedureStoreMigration.java | 4 +-
.../regionserver/TestHeapMemoryManager.java | 24 +-
.../TestRecoveredEditsReplayAndAbort.java | 3 +-
.../hbase/regionserver/TestStoreFileInfo.java | 3 +-
.../hbase/security/TestSecurityUtil.java | 751 ++++++++++++++++++
.../token/TestTokenAuthentication.java | 20 +-
.../snapshot/TestRestoreSnapshotHelper.java | 4 +-
.../apache/hadoop/hbase/util/MockServer.java | 24 +-
hbase-shell/pom.xml | 6 +
hbase-shell/src/main/ruby/hbase/hbase.rb | 26 +-
.../shell/commands/keymeta_command_base.rb | 2 +-
.../hbase/client/TestKeymetaAdminShell.java | 127 +++
.../src/test/ruby/shell/admin_keymeta_test.rb | 61 ++
.../shell/encrypted_table_keymeta_test.rb | 143 ++++
83 files changed, 2787 insertions(+), 585 deletions(-)
create mode 100644 hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementService.java
create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyNamespaceUtil.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java
create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java
create mode 100644 hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java
create mode 100644 hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb
create mode 100644 hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb
diff --git a/.gitignore b/.gitignore
index fc93b1447ba1..274a0740c85e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,6 +25,7 @@ linklint/
**/*.log
tmp
**/.flattened-pom.xml
+.sw*
.*.sw*
ID
filenametags
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index b06ca9ce0d1e..05a1a4b0b66b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -27,7 +27,6 @@
import org.apache.commons.crypto.cipher.CryptoCipherFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES;
@@ -219,57 +218,6 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value)
return getUnwrapKey(conf, subject, wrappedKey, cipher, null);
}
- /**
- * Helper to create an encyption context.
- * @param conf The current configuration.
- * @param family The current column descriptor.
- * @return The created encryption context.
- * @throws IOException if an encryption key for the column cannot be unwrapped
- * @throws IllegalStateException in case of encryption related configuration errors
- */
- public static Encryption.Context createEncryptionContext(Configuration conf,
- ColumnFamilyDescriptor family) throws IOException {
- Encryption.Context cryptoContext = Encryption.Context.NONE;
- String cipherName = family.getEncryptionType();
- if (cipherName != null) {
- if (!Encryption.isEncryptionEnabled(conf)) {
- throw new IllegalStateException("Encryption for family '" + family.getNameAsString()
- + "' configured with type '" + cipherName + "' but the encryption feature is disabled");
- }
- Cipher cipher;
- Key key;
- byte[] keyBytes = family.getEncryptionKey();
- if (keyBytes != null) {
- // Family provides specific key material
- key = unwrapKey(conf, keyBytes);
- // Use the algorithm the key wants
- cipher = Encryption.getCipher(conf, key.getAlgorithm());
- if (cipher == null) {
- throw new IllegalStateException("Cipher '" + key.getAlgorithm() + "' is not available");
- }
- // Fail if misconfigured
- // We use the encryption type specified in the column schema as a sanity check on
- // what the wrapped key is telling us
- if (!cipher.getName().equalsIgnoreCase(cipherName)) {
- throw new IllegalStateException(
- "Encryption for family '" + family.getNameAsString() + "' configured with type '"
- + cipherName + "' but key specifies algorithm '" + cipher.getName() + "'");
- }
- } else {
- // Family does not provide key material, create a random key
- cipher = Encryption.getCipher(conf, cipherName);
- if (cipher == null) {
- throw new IllegalStateException("Cipher '" + cipherName + "' is not available");
- }
- key = cipher.getRandomKey();
- }
- cryptoContext = Encryption.newContext(conf);
- cryptoContext.setCipher(cipher);
- cryptoContext.setKey(key);
- }
- return cryptoContext;
- }
-
/**
* Helper for {@link #unwrapKey(Configuration, String, byte[])} which automatically uses the
* configured master and alternative keys, rather than having to specify a key type to unwrap
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index b9dfa9afc5d8..2dca4f7e452d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -343,6 +343,7 @@ public enum OperationStatusCode {
/** Parameter name for HBase instance root directory */
public static final String HBASE_DIR = "hbase.rootdir";
+ public static final String HBASE_ORIGINAL_DIR = "hbase.originalRootdir";
/** Parameter name for HBase client IPC pool type */
public static final String HBASE_CLIENT_IPC_POOL_TYPE = "hbase.client.ipc.pool.type";
@@ -1342,6 +1343,11 @@ public enum OperationStatusCode {
"hbase.crypto.managed_keys.l1_active_cache.max_ns_entries";
public static final int CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT = 100;
+ /** Enables or disables local key generation per file. */
+ public static final String CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY =
+ "hbase.crypto.managed_keys.local_key_gen_per_file.enabled";
+ public static final boolean CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_DEFAULT_ENABLED = false;
+
/** Configuration key for setting RPC codec class name */
public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec";
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java
index ce32351fecdf..7e816b917628 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Context.java
@@ -34,6 +34,8 @@ public class Context implements Configurable {
private Configuration conf;
private Cipher cipher;
private Key key;
+ private ManagedKeyData kekData;
+ private String keyNamespace;
private String keyHash;
Context(Configuration conf) {
@@ -97,4 +99,22 @@ public Context setKey(Key key) {
this.keyHash = new String(Hex.encodeHex(Encryption.computeCryptoKeyHash(conf, encoded)));
return this;
}
+
+ public Context setKeyNamespace(String keyNamespace) {
+ this.keyNamespace = keyNamespace;
+ return this;
+ }
+
+ public String getKeyNamespace() {
+ return keyNamespace;
+ }
+
+ public Context setKEKData(ManagedKeyData kekData) {
+ this.kekData = kekData;
+ return this;
+ }
+
+ public ManagedKeyData getKEKData() {
+ return kekData;
+ }
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index a176a4329422..91af77361a0e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -117,12 +117,6 @@ public Context setCipher(Cipher cipher) {
return this;
}
- @Override
- public Context setKey(Key key) {
- super.setKey(key);
- return this;
- }
-
public Context setKey(byte[] key) {
super.setKey(new SecretKeySpec(key, getCipher().getName()));
return this;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
index e9c00935d38e..ffd5dbb7b574 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
@@ -54,11 +54,17 @@ public class ManagedKeyData {
*/
public static final String KEY_SPACE_GLOBAL = "*";
+ /**
+ * Special value to be used for custodian to indicate that it is global, meaning it is not
+ * associated with a specific custodian.
+ */
+ public static final byte[] KEY_GLOBAL_CUSTODIAN_BYTES = KEY_SPACE_GLOBAL.getBytes();
+
/**
* Encoded form of global custodian.
*/
public static final String KEY_GLOBAL_CUSTODIAN =
- ManagedKeyProvider.encodeToStr(KEY_SPACE_GLOBAL.getBytes());
+ ManagedKeyProvider.encodeToStr(KEY_GLOBAL_CUSTODIAN_BYTES);
private final byte[] keyCustodian;
private final String keyNamespace;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java
index 0bb2aef7d99b..39f460e062ae 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.hbase.io.crypto;
import java.security.Key;
+import java.util.HashMap;
+import java.util.Map;
import javax.crypto.spec.SecretKeySpec;
import org.apache.yetus.audience.InterfaceAudience;
@@ -27,8 +29,13 @@
@InterfaceAudience.Private
public class MockAesKeyProvider implements KeyProvider {
+ private Map keys = new HashMap<>();
+
+ private boolean cacheKeys = false;
+
@Override
public void init(String parameters) {
+ cacheKeys = Boolean.parseBoolean(parameters);
}
@Override
@@ -40,7 +47,14 @@ public Key getKey(String name) {
public Key[] getKeys(String[] aliases) {
Key[] result = new Key[aliases.length];
for (int i = 0; i < aliases.length; i++) {
- result[i] = new SecretKeySpec(Encryption.hash128(aliases[i]), "AES");
+ if (keys.containsKey(aliases[i])) {
+ result[i] = keys.get(aliases[i]);
+ } else {
+ result[i] = new SecretKeySpec(Encryption.hash128(aliases[i]), "AES");
+ if (cacheKeys) {
+ keys.put(aliases[i], result[i]);
+ }
+ }
}
return result;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index da4662d2c8a0..d79cb6f38873 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -288,17 +288,48 @@ public static String getPath(Path p) {
* @throws IOException e
*/
public static Path getRootDir(final Configuration c) throws IOException {
- Path p = new Path(c.get(HConstants.HBASE_DIR));
+ return getRootDir(c, HConstants.HBASE_DIR);
+ }
+
+ /**
+ * Get the path for the original root data directory, which could be different from the current
+ * root directory, in case it was changed.
+ * @param c configuration
+ * @return {@link Path} to hbase original root directory from configuration as a qualified Path.
+ * @throws IOException e
+ */
+ public static Path getOriginalRootDir(final Configuration c) throws IOException {
+ return getRootDir(c,
+ c.get(HConstants.HBASE_ORIGINAL_DIR) == null
+ ? HConstants.HBASE_DIR
+ : HConstants.HBASE_ORIGINAL_DIR);
+ }
+
+ /**
+ * Get the path for the root data directory
+ * @param c configuration
+ * @param rootDirProp the property name for the root directory
+ * @return {@link Path} to hbase root directory from configuration as a qualified Path.
+ * @throws IOException e
+ */
+ public static Path getRootDir(final Configuration c, final String rootDirProp)
+ throws IOException {
+ Path p = new Path(c.get(rootDirProp));
FileSystem fs = p.getFileSystem(c);
return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
public static void setRootDir(final Configuration c, final Path root) {
+ // Keep track of the original root dir.
+ if (c.get(HConstants.HBASE_ORIGINAL_DIR) == null && c.get(HConstants.HBASE_DIR) != null) {
+ c.set(HConstants.HBASE_ORIGINAL_DIR, c.get(HConstants.HBASE_DIR));
+ }
c.set(HConstants.HBASE_DIR, root.toString());
}
public static Path getSystemKeyDir(final Configuration c) throws IOException {
- return new Path(getRootDir(c), HConstants.SYSTEM_KEYS_DIRECTORY);
+ // Always use the original root dir for system key dir, in case it was changed..
+ return new Path(getOriginalRootDir(c), HConstants.SYSTEM_KEYS_DIRECTORY);
}
public static void setFsDefault(final Configuration c, final Path root) {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java
new file mode 100644
index 000000000000..3a8fb3d32464
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.net.URLEncoder;
+import java.security.KeyStore;
+import java.security.MessageDigest;
+import java.util.Base64;
+import java.util.Map;
+import java.util.Properties;
+import java.util.function.Function;
+import javax.crypto.spec.SecretKeySpec;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+public class KeymetaTestUtils {
+
+ /**
+ * A ByteArrayInputStream that implements Seekable and PositionedReadable to work with
+ * FSDataInputStream.
+ */
+ public static class SeekableByteArrayInputStream extends ByteArrayInputStream
+ implements Seekable, PositionedReadable {
+
+ public SeekableByteArrayInputStream(byte[] buf) {
+ super(buf);
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ if (pos < this.mark || pos > buf.length) {
+ throw new IOException("Seek position out of bounds: " + pos);
+ }
+ this.pos = (int) pos;
+ this.mark = (int) pos;
+ }
+
+ @Override
+ public long getPos() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public boolean seekToNewSource(long targetPos) throws IOException {
+ return false; // No alternate sources
+ }
+
+ @Override
+ public int read(long position, byte[] buffer, int offset, int length) throws IOException {
+ if (position < 0 || position >= buf.length) {
+ return -1;
+ }
+ int currentPos = pos;
+ seek(position);
+ int bytesRead = read(buffer, offset, length);
+ pos = currentPos; // Restore original position
+ return bytesRead;
+ }
+
+ @Override
+ public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
+ int totalBytesRead = 0;
+ while (totalBytesRead < length) {
+ int bytesRead =
+ read(position + totalBytesRead, buffer, offset + totalBytesRead, length - totalBytesRead);
+ if (bytesRead == -1) {
+ throw new IOException("Reached end of stream before reading fully");
+ }
+ totalBytesRead += bytesRead;
+ }
+ }
+
+ @Override
+ public void readFully(long position, byte[] buffer) throws IOException {
+ readFully(position, buffer, 0, buffer.length);
+ }
+ }
+
+ private KeymetaTestUtils() {
+ // Utility class
+ }
+
+ public static final String ALIAS = "test";
+ public static final String PASSWORD = "password";
+
+ public static void addEntry(Configuration conf, int keyLen, KeyStore store, String alias,
+ String custodian, boolean withPasswordOnAlias, Map cust2key,
+ Map cust2alias, Properties passwordFileProps) throws Exception {
+ Preconditions.checkArgument(keyLen == 256 || keyLen == 128, "Key length must be 256 or 128");
+ byte[] key =
+ MessageDigest.getInstance(keyLen == 256 ? "SHA-256" : "MD5").digest(Bytes.toBytes(alias));
+ cust2alias.put(new Bytes(custodian.getBytes()), alias);
+ cust2key.put(new Bytes(custodian.getBytes()), new Bytes(key));
+ store.setEntry(alias, new KeyStore.SecretKeyEntry(new SecretKeySpec(key, "AES")),
+ new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
+ String encCust = Base64.getEncoder().encodeToString(custodian.getBytes());
+ String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + "." + "alias";
+ conf.set(confKey, alias);
+ if (passwordFileProps != null) {
+ passwordFileProps.setProperty(alias, PASSWORD);
+ }
+ }
+
+ public static String setupTestKeyStore(HBaseCommonTestingUtil testUtil,
+ boolean withPasswordOnAlias, boolean withPasswordFile,
+ Function customEntriesAdder) throws Exception {
+ KeyStore store = KeyStore.getInstance("JCEKS");
+ store.load(null, PASSWORD.toCharArray());
+ Properties passwordProps = null;
+ if (customEntriesAdder != null) {
+ passwordProps = customEntriesAdder.apply(store);
+ }
+ // Create the test directory
+ String dataDir = testUtil.getDataTestDir().toString();
+ new File(dataDir).mkdirs();
+ // Write the keystore file
+ File storeFile = new File(dataDir, "keystore.jks");
+ FileOutputStream os = new FileOutputStream(storeFile);
+ try {
+ store.store(os, PASSWORD.toCharArray());
+ } finally {
+ os.close();
+ }
+ File passwordFile = null;
+ if (withPasswordFile) {
+ passwordFile = new File(dataDir, "keystore.pw");
+ os = new FileOutputStream(passwordFile);
+ try {
+ passwordProps.store(os, "");
+ } finally {
+ os.close();
+ }
+ }
+ String providerParams;
+ if (withPasswordFile) {
+ providerParams = "jceks://" + storeFile.toURI().getPath() + "?passwordFile="
+ + URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8");
+ } else {
+ providerParams = "jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD;
+ }
+ return providerParams;
+ }
+
+ public static FileStatus createMockFile(String fileName) {
+ Path mockPath = mock(Path.class);
+ when(mockPath.getName()).thenReturn(fileName);
+ FileStatus mockFileStatus = mock(FileStatus.class);
+ when(mockFileStatus.getPath()).thenReturn(mockPath);
+ return mockFileStatus;
+ }
+
+ public static Path createMockPath(String tableName, String family) {
+ Path mockPath = mock(Path.class);
+ Path mockRegionDir = mock(Path.class);
+ Path mockTableDir = mock(Path.class);
+ Path mockNamespaceDir = mock(Path.class);
+ Path mockFamilyDir = mock(Path.class);
+ Path mockDataDir = mock(Path.class);
+ when(mockPath.getParent()).thenReturn(mockFamilyDir);
+ when(mockFamilyDir.getParent()).thenReturn(mockRegionDir);
+ when(mockRegionDir.getParent()).thenReturn(mockTableDir);
+ when(mockTableDir.getParent()).thenReturn(mockNamespaceDir);
+ when(mockNamespaceDir.getParent()).thenReturn(mockDataDir);
+ when(mockTableDir.getName()).thenReturn(tableName);
+ when(mockFamilyDir.getName()).thenReturn(family);
+ return mockPath;
+ }
+}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
index a0304e6337fb..bb19d4222001 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
@@ -17,12 +17,11 @@
*/
package org.apache.hadoop.hbase.io.crypto;
+import static org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils.ALIAS;
+import static org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils.PASSWORD;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.net.URLEncoder;
import java.security.Key;
import java.security.KeyStore;
import java.security.MessageDigest;
@@ -51,12 +50,8 @@ public class TestKeyStoreKeyProvider {
HBaseClassTestRule.forClass(TestKeyStoreKeyProvider.class);
static final HBaseCommonTestingUtil TEST_UTIL = new HBaseCommonTestingUtil();
- static final String ALIAS = "test";
- static final String PASSWORD = "password";
static byte[] KEY;
- static File storeFile;
- static File passwordFile;
protected KeyProvider provider;
@@ -75,40 +70,21 @@ public static Collection parameters() {
@Before
public void setUp() throws Exception {
KEY = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(ALIAS));
- // Create a JKECS store containing a test secret key
- KeyStore store = KeyStore.getInstance("JCEKS");
- store.load(null, PASSWORD.toCharArray());
- store.setEntry(ALIAS, new KeyStore.SecretKeyEntry(new SecretKeySpec(KEY, "AES")),
- new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
- Properties p = new Properties();
- addCustomEntries(store, p);
- // Create the test directory
- String dataDir = TEST_UTIL.getDataTestDir().toString();
- new File(dataDir).mkdirs();
- // Write the keystore file
- storeFile = new File(dataDir, "keystore.jks");
- FileOutputStream os = new FileOutputStream(storeFile);
- try {
- store.store(os, PASSWORD.toCharArray());
- } finally {
- os.close();
- }
- // Write the password file
- passwordFile = new File(dataDir, "keystore.pw");
- os = new FileOutputStream(passwordFile);
- try {
- p.store(os, "");
- } finally {
- os.close();
- }
-
+ String providerParams = KeymetaTestUtils.setupTestKeyStore(TEST_UTIL, withPasswordOnAlias,
+ withPasswordFile, store -> {
+ Properties p = new Properties();
+ try {
+ store.setEntry(ALIAS, new KeyStore.SecretKeyEntry(new SecretKeySpec(KEY, "AES")),
+ new KeyStore.PasswordProtection(
+ withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
+ addCustomEntries(store, p);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return p;
+ });
provider = createProvider();
- if (withPasswordFile) {
- provider.init("jceks://" + storeFile.toURI().getPath() + "?passwordFile="
- + URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8"));
- } else {
- provider.init("jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD);
- }
+ provider.init(providerParams);
}
protected KeyProvider createProvider() {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
index 472ce56405a9..405c5731be94 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.io.crypto;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES;
import static org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider.KEY_METADATA_ALIAS;
import static org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider.KEY_METADATA_CUST;
import static org.junit.Assert.assertEquals;
@@ -26,14 +27,12 @@
import static org.junit.Assert.assertTrue;
import java.security.KeyStore;
-import java.security.MessageDigest;
import java.util.Arrays;
import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
-import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -66,10 +65,10 @@ public static class TestManagedKeyStoreKeyProvider extends TestKeyStoreKeyProvid
private static final String SYSTEM_KEY_ALIAS = "system-alias";
private Configuration conf = HBaseConfiguration.create();
- private int nPrefixes = 2;
+ private int nCustodians = 2;
private ManagedKeyProvider managedKeyProvider;
- private Map prefix2key = new HashMap<>();
- private Map prefix2alias = new HashMap<>();
+ private Map cust2key = new HashMap<>();
+ private Map cust2alias = new HashMap<>();
private String clusterId;
private byte[] systemKey;
@@ -86,41 +85,21 @@ protected KeyProvider createProvider() {
protected void addCustomEntries(KeyStore store, Properties passwdProps) throws Exception {
super.addCustomEntries(store, passwdProps);
- for (int i = 0; i < nPrefixes; ++i) {
- String prefix = "prefix+ " + i;
- String alias = prefix + "-alias";
- byte[] key = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(alias));
- prefix2alias.put(new Bytes(prefix.getBytes()), alias);
- prefix2key.put(new Bytes(prefix.getBytes()), new Bytes(key));
- store.setEntry(alias, new KeyStore.SecretKeyEntry(new SecretKeySpec(key, "AES")),
- new KeyStore.PasswordProtection(
- withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
-
- String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes());
- String confKey =
- HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "." + "alias";
- conf.set(confKey, alias);
-
- passwdProps.setProperty(alias, PASSWORD);
-
- clusterId = UUID.randomUUID().toString();
- systemKey = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(SYSTEM_KEY_ALIAS));
- store.setEntry(SYSTEM_KEY_ALIAS,
- new KeyStore.SecretKeyEntry(new SecretKeySpec(systemKey, "AES")),
- new KeyStore.PasswordProtection(
- withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
-
- conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS);
-
- passwdProps.setProperty(SYSTEM_KEY_ALIAS, PASSWORD);
+ for (int i = 0; i < nCustodians; ++i) {
+ String custodian = "custodian+ " + i;
+ String alias = custodian + "-alias";
+ KeymetaTestUtils.addEntry(conf, 256, store, alias, custodian, withPasswordOnAlias, cust2key,
+ cust2alias, passwdProps);
}
- }
- private void addEntry(String alias, String prefix) {
- String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes());
- String confKey =
- HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "." + "alias";
- conf.set(confKey, alias);
+ clusterId = UUID.randomUUID().toString();
+ KeymetaTestUtils.addEntry(conf, 256, store, SYSTEM_KEY_ALIAS, clusterId, withPasswordOnAlias,
+ cust2key, cust2alias, passwdProps);
+ systemKey = cust2key.get(new Bytes(clusterId.getBytes())).get();
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS);
+
+ KeymetaTestUtils.addEntry(conf, 256, store, "global-cust-alias", "*", withPasswordOnAlias,
+ cust2key, cust2alias, passwdProps);
}
@Test
@@ -133,46 +112,54 @@ public void testMissingConfig() throws Exception {
@Test
public void testGetManagedKey() throws Exception {
- for (Bytes prefix : prefix2key.keySet()) {
+ for (Bytes cust : cust2key.keySet()) {
ManagedKeyData keyData =
- managedKeyProvider.getManagedKey(prefix.get(), ManagedKeyData.KEY_SPACE_GLOBAL);
- assertKeyData(keyData, ManagedKeyState.ACTIVE, prefix2key.get(prefix).get(), prefix.get(),
- prefix2alias.get(prefix));
+ managedKeyProvider.getManagedKey(cust.get(), ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyData(keyData, ManagedKeyState.ACTIVE, cust2key.get(cust).get(), cust.get(),
+ cust2alias.get(cust));
}
}
+ @Test
+ public void testGetGlobalCustodianKey() throws Exception {
+ byte[] globalCustodianKey = cust2key.get(new Bytes(KEY_GLOBAL_CUSTODIAN_BYTES)).get();
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(KEY_GLOBAL_CUSTODIAN_BYTES,
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ assertKeyData(keyData, ManagedKeyState.ACTIVE, globalCustodianKey, KEY_GLOBAL_CUSTODIAN_BYTES,
+ "global-cust-alias");
+ }
+
@Test
public void testGetInactiveKey() throws Exception {
- Bytes firstPrefix = prefix2key.keySet().iterator().next();
- String encPrefix = Base64.getEncoder().encodeToString(firstPrefix.get());
- conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + ".active",
- "false");
+ Bytes firstCust = cust2key.keySet().iterator().next();
+ String encCust = Base64.getEncoder().encodeToString(firstCust.get());
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + ".active", "false");
ManagedKeyData keyData =
- managedKeyProvider.getManagedKey(firstPrefix.get(), ManagedKeyData.KEY_SPACE_GLOBAL);
+ managedKeyProvider.getManagedKey(firstCust.get(), ManagedKeyData.KEY_SPACE_GLOBAL);
assertNotNull(keyData);
- assertKeyData(keyData, ManagedKeyState.INACTIVE, prefix2key.get(firstPrefix).get(),
- firstPrefix.get(), prefix2alias.get(firstPrefix));
+ assertKeyData(keyData, ManagedKeyState.INACTIVE, cust2key.get(firstCust).get(),
+ firstCust.get(), cust2alias.get(firstCust));
}
@Test
public void testGetInvalidKey() throws Exception {
- byte[] invalidPrefixBytes = "invalid".getBytes();
+ byte[] invalidCustBytes = "invalid".getBytes();
ManagedKeyData keyData =
- managedKeyProvider.getManagedKey(invalidPrefixBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
+ managedKeyProvider.getManagedKey(invalidCustBytes, ManagedKeyData.KEY_SPACE_GLOBAL);
assertNotNull(keyData);
- assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefixBytes, null);
+ assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidCustBytes, null);
}
@Test
public void testGetDisabledKey() throws Exception {
- byte[] invalidPrefix = new byte[] { 1, 2, 3 };
- String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
- conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active",
+ byte[] invalidCust = new byte[] { 1, 2, 3 };
+ String invalidCustEnc = ManagedKeyProvider.encodeToStr(invalidCust);
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".active",
"false");
ManagedKeyData keyData =
- managedKeyProvider.getManagedKey(invalidPrefix, ManagedKeyData.KEY_SPACE_GLOBAL);
+ managedKeyProvider.getManagedKey(invalidCust, ManagedKeyData.KEY_SPACE_GLOBAL);
assertNotNull(keyData);
- assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidPrefix, null);
+ assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidCust, null);
}
@Test
@@ -192,31 +179,31 @@ public void testGetSystemKey() throws Exception {
@Test
public void testUnwrapInvalidKey() throws Exception {
String invalidAlias = "invalidAlias";
- byte[] invalidPrefix = new byte[] { 1, 2, 3 };
- String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
+ byte[] invalidCust = new byte[] { 1, 2, 3 };
+ String invalidCustEnc = ManagedKeyProvider.encodeToStr(invalidCust);
String invalidMetadata =
- ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidPrefixEnc);
+ ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidCustEnc);
ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null);
assertNotNull(keyData);
- assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefix, invalidAlias);
+ assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidCust, invalidAlias);
}
@Test
public void testUnwrapDisabledKey() throws Exception {
String invalidAlias = "invalidAlias";
- byte[] invalidPrefix = new byte[] { 1, 2, 3 };
- String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix);
- conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active",
+ byte[] invalidCust = new byte[] { 1, 2, 3 };
+ String invalidCustEnc = ManagedKeyProvider.encodeToStr(invalidCust);
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".active",
"false");
String invalidMetadata =
- ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidPrefixEnc);
+ ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidCustEnc);
ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null);
assertNotNull(keyData);
- assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidPrefix, invalidAlias);
+ assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidCust, invalidAlias);
}
private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState, byte[] key,
- byte[] prefixBytes, String alias) throws Exception {
+ byte[] custBytes, String alias) throws Exception {
assertNotNull(keyData);
assertEquals(expKeyState, keyData.getKeyState());
if (key == null) {
@@ -229,9 +216,9 @@ private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState,
Map keyMetadata =
GsonUtil.getDefaultInstance().fromJson(keyData.getKeyMetadata(), HashMap.class);
assertNotNull(keyMetadata);
- assertEquals(new Bytes(prefixBytes), keyData.getKeyCustodian());
+ assertEquals(new Bytes(custBytes), keyData.getKeyCustodian());
assertEquals(alias, keyMetadata.get(KEY_METADATA_ALIAS));
- assertEquals(Base64.getEncoder().encodeToString(prefixBytes),
+ assertEquals(Base64.getEncoder().encodeToString(custBytes),
keyMetadata.get(KEY_METADATA_CUST));
assertEquals(keyData, managedKeyProvider.unwrapKey(keyData.getKeyMetadata(), null));
}
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/io/HFile.proto b/hbase-protocol-shaded/src/main/protobuf/server/io/HFile.proto
index fd1b9b3680d8..26a343a5d04f 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/io/HFile.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/io/HFile.proto
@@ -51,4 +51,7 @@ message FileTrailerProto {
optional string comparator_class_name = 11;
optional uint32 compression_codec = 12;
optional bytes encryption_key = 13;
+ optional string key_namespace = 14;
+ optional string kek_metadata = 15;
+ optional uint64 kek_checksum = 16;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
index 12cc7433e7be..0993fc0f09da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
import org.apache.hadoop.hbase.keymeta.KeymetaAdminImpl;
import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
@@ -92,7 +93,7 @@
*/
@InterfaceAudience.Private
public abstract class HBaseServerBase> extends Thread
- implements Server, ConfigurationObserver, ConnectionRegistryEndpoint {
+ implements Server, ConfigurationObserver, ConnectionRegistryEndpoint, KeyManagementService {
private static final Logger LOG = LoggerFactory.getLogger(HBaseServerBase.class);
@@ -661,6 +662,11 @@ public void updateConfiguration() throws IOException {
postUpdateConfiguration();
}
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return this;
+ }
+
private void preUpdateConfiguration() throws IOException {
CoprocessorHost, ?> coprocessorHost = getCoprocessorHost();
if (coprocessorHost instanceof RegionServerCoprocessorHost) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 39d09ab170f3..4b5d36382eff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
@@ -404,4 +405,9 @@ public AsyncClusterConnection getAsyncClusterConnection() {
public RegionReplicationBufferManager getRegionReplicationBufferManager() {
return null;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return this;
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index c1a6d7dc9ec8..ba258d14add9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -23,9 +23,7 @@
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
-import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
-import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience;
@@ -86,15 +84,6 @@ default AsyncConnection getAsyncConnection() {
/** Returns The {@link ChoreService} instance for this server */
ChoreService getChoreService();
- /** Returns the cache for cluster keys. */
- public SystemKeyCache getSystemKeyCache();
-
- /** Returns the cache for managed keys. */
- public ManagedKeyDataCache getManagedKeyDataCache();
-
- /** Returns the admin for keymeta. */
- public KeymetaAdmin getKeymetaAdmin();
-
/** Returns Return the FileSystem object used (can return null!). */
// TODO: Distinguish between "dataFs" and "walFs".
default FileSystem getFileSystem() {
@@ -116,4 +105,7 @@ default FileSystem getFileSystem() {
default boolean isStopping() {
return false;
}
+
+ /** Returns the KeyManagementService instance for this server. */
+ KeyManagementService getKeyManagementService();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index df99fd403387..eb7c77554b02 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -63,7 +63,7 @@ public ClientSideRegionScanner(Configuration conf, FileSystem fs, Path rootDir,
// open region from the snapshot directory
region = HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, htd.getTableName()), null, fs,
- conf, hri, htd, null);
+ conf, hri, htd, null, null);
region.setRestoredRegion(true);
// non RS process does not have a block cache, and this a client side scanner,
// create one for MapReduce jobs to cache the INDEX block by setting to use
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
index bd5fac1c3c45..85201ccd8bdf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java
@@ -174,6 +174,15 @@ public Path getMobPath() {
return this.mobPath;
}
+ /**
+ * Get the table name and family name from the origin path.
+ * @return the table name and family name
+ */
+ public Pair getTableNameAndFamilyName() {
+ return new Pair<>(this.originPath.getParent().getName(),
+ this.originPath.getParent().getParent().getParent().getName());
+ }
+
/**
* @param path Path to check.
* @return True if the path is a HFileLink.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 74b560022a8b..d3337d24712c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -130,6 +130,21 @@ public class FixedFileTrailer {
*/
private byte[] encryptionKey;
+ /**
+ * The key namespace
+ */
+ private String keyNamespace;
+
+ /**
+ * The KEK checksum
+ */
+ private long kekChecksum;
+
+ /**
+ * The KEK metadata
+ */
+ private String kekMetadata;
+
/**
* The {@link HFile} format major version.
*/
@@ -211,6 +226,15 @@ HFileProtos.FileTrailerProto toProtobuf() {
if (encryptionKey != null) {
builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(encryptionKey));
}
+ if (keyNamespace != null) {
+ builder.setKeyNamespace(keyNamespace);
+ }
+ if (kekMetadata != null) {
+ builder.setKekMetadata(kekMetadata);
+ }
+ if (kekChecksum != 0) {
+ builder.setKekChecksum(kekChecksum);
+ }
return builder.build();
}
@@ -313,6 +337,15 @@ void deserializeFromPB(DataInputStream inputStream) throws IOException {
if (trailerProto.hasEncryptionKey()) {
encryptionKey = trailerProto.getEncryptionKey().toByteArray();
}
+ if (trailerProto.hasKeyNamespace()) {
+ keyNamespace = trailerProto.getKeyNamespace();
+ }
+ if (trailerProto.hasKekMetadata()) {
+ kekMetadata = trailerProto.getKekMetadata();
+ }
+ if (trailerProto.hasKekChecksum()) {
+ kekChecksum = trailerProto.getKekChecksum();
+ }
}
/**
@@ -362,6 +395,9 @@ public String toString() {
if (majorVersion >= 3) {
append(sb, "encryptionKey=" + (encryptionKey != null ? "PRESENT" : "NONE"));
}
+ if (keyNamespace != null) {
+ append(sb, "keyNamespace=" + keyNamespace);
+ }
append(sb, "majorVersion=" + majorVersion);
append(sb, "minorVersion=" + minorVersion);
@@ -641,10 +677,34 @@ public byte[] getEncryptionKey() {
return encryptionKey;
}
+ public String getKeyNamespace() {
+ return keyNamespace;
+ }
+
+ public void setKeyNamespace(String keyNamespace) {
+ this.keyNamespace = keyNamespace;
+ }
+
+ public void setKEKChecksum(long kekChecksum) {
+ this.kekChecksum = kekChecksum;
+ }
+
+ public long getKEKChecksum() {
+ return kekChecksum;
+ }
+
public void setEncryptionKey(byte[] keyBytes) {
this.encryptionKey = keyBytes;
}
+ public String getKEKMetadata() {
+ return kekMetadata;
+ }
+
+ public void setKEKMetadata(String kekMetadata) {
+ this.kekMetadata = kekMetadata;
+ }
+
/**
* Extracts the major version for a 4-byte serialized version data. The major version is the 3
* least significant bytes
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index a99eac4085e4..6392b36ef12f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -41,9 +41,13 @@
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType;
import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.regionserver.CellSink;
import org.apache.hadoop.hbase.regionserver.ShipperListener;
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -554,10 +558,18 @@ public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheCon
boolean primaryReplicaReader, Configuration conf) throws IOException {
Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf");
FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
+ KeyManagementService keyManagementService = SecurityUtil.isKeyManagementEnabled(conf)
+ ? KeyManagementService.createDefault(conf, fs)
+ : null;
+ ManagedKeyDataCache managedKeyDataCache =
+ keyManagementService != null ? keyManagementService.getManagedKeyDataCache() : null;
+ SystemKeyCache systemKeyCache =
+ keyManagementService != null ? keyManagementService.getSystemKeyCache() : null;
ReaderContext context =
new ReaderContextBuilder().withFilePath(path).withInputStreamWrapper(stream)
.withFileSize(fs.getFileStatus(path).getLen()).withFileSystem(stream.getHfs())
- .withPrimaryReplicaReader(primaryReplicaReader).withReaderType(ReaderType.PREAD).build();
+ .withPrimaryReplicaReader(primaryReplicaReader).withReaderType(ReaderType.PREAD)
+ .withManagedKeyDataCache(managedKeyDataCache).withSystemKeyCache(systemKeyCache).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
Reader reader = createReader(context, fileInfo, cacheConf, conf);
fileInfo.initMetaAndIndex(reader);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
index 2386e8d82a56..b3da98f13434 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
@@ -22,7 +22,6 @@
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.SequenceInputStream;
-import java.security.Key;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
@@ -39,10 +38,8 @@
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.io.crypto.Cipher;
-import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -351,7 +348,7 @@ public void initTrailerAndContext(ReaderContext context, Configuration conf) thr
context.getInputStreamWrapper().getStream(isHBaseChecksum), context.getFileSize());
Path path = context.getFilePath();
checkFileVersion(path);
- this.hfileContext = createHFileContext(path, trailer, conf);
+ this.hfileContext = createHFileContext(context, path, trailer, conf);
context.getInputStreamWrapper().unbuffer();
} catch (Throwable t) {
IOUtils.closeQuietly(context.getInputStreamWrapper(),
@@ -409,30 +406,16 @@ public void initMetaAndIndex(HFile.Reader reader) throws IOException {
initialized = true;
}
- private HFileContext createHFileContext(Path path, FixedFileTrailer trailer, Configuration conf)
- throws IOException {
- HFileContextBuilder builder = new HFileContextBuilder().withHBaseCheckSum(true)
- .withHFileName(path.getName()).withCompression(trailer.getCompressionCodec())
+ private HFileContext createHFileContext(ReaderContext readerContext, Path path,
+ FixedFileTrailer trailer, Configuration conf) throws IOException {
+ return new HFileContextBuilder().withHBaseCheckSum(true).withHFileName(path.getName())
+ .withCompression(trailer.getCompressionCodec())
.withDecompressionContext(
trailer.getCompressionCodec().getHFileDecompressionContextForConfiguration(conf))
- .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName()));
- // Check for any key material available
- byte[] keyBytes = trailer.getEncryptionKey();
- if (keyBytes != null) {
- Encryption.Context cryptoContext = Encryption.newContext(conf);
- Key key = EncryptionUtil.unwrapKey(conf, keyBytes);
- // Use the algorithm the key wants
- Cipher cipher = Encryption.getCipher(conf, key.getAlgorithm());
- if (cipher == null) {
- throw new IOException(
- "Cipher '" + key.getAlgorithm() + "' is not available" + ", path=" + path);
- }
- cryptoContext.setCipher(cipher);
- cryptoContext.setKey(key);
- builder.withEncryptionContext(cryptoContext);
- }
- HFileContext context = builder.build();
- return context;
+ .withCellComparator(FixedFileTrailer.createComparator(trailer.getComparatorClassName()))
+ .withEncryptionContext(SecurityUtil.createEncryptionContext(conf, path, trailer,
+ readerContext.getManagedKeyDataCache(), readerContext.getSystemKeyCache()))
+ .build();
}
private void loadMetaInfo(HFileBlock.BlockIterator blockIter, HFileContext hfileContext)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index 684aee3beaca..2b74d177a4fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
+import java.security.Key;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
@@ -48,6 +49,7 @@
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.encoding.IndexBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable;
@@ -877,12 +879,33 @@ protected void finishClose(FixedFileTrailer trailer) throws IOException {
// Write out encryption metadata before finalizing if we have a valid crypto context
Encryption.Context cryptoContext = hFileContext.getEncryptionContext();
if (cryptoContext != Encryption.Context.NONE) {
+ String wrapperSubject = null;
+ Key encKey = null;
+ Key wrapperKey = null;
+ ManagedKeyData kekData = cryptoContext.getKEKData();
+ String keyNamespace = cryptoContext.getKeyNamespace();
+ String kekMetadata = null;
+ long kekChecksum = 0;
+ if (kekData != null) {
+ kekMetadata = kekData.getKeyMetadata();
+ kekChecksum = kekData.getKeyChecksum();
+ wrapperKey = kekData.getTheKey();
+ encKey = cryptoContext.getKey();
+ } else {
+ wrapperSubject = cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,
+ User.getCurrent().getShortName());
+ encKey = cryptoContext.getKey();
+ }
// Wrap the context's key and write it as the encryption metadata, the wrapper includes
// all information needed for decryption
- trailer.setEncryptionKey(EncryptionUtil.wrapKey(
- cryptoContext.getConf(), cryptoContext.getConf()
- .get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()),
- cryptoContext.getKey()));
+ if (encKey != null) {
+ byte[] wrappedKey =
+ EncryptionUtil.wrapKey(cryptoContext.getConf(), wrapperSubject, encKey, wrapperKey);
+ trailer.setEncryptionKey(wrappedKey);
+ }
+ trailer.setKeyNamespace(keyNamespace);
+ trailer.setKEKMetadata(kekMetadata);
+ trailer.setKEKChecksum(kekChecksum);
}
// Now we can finish the close
trailer.setMetaIndexCount(metaNames.size());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java
index d6f711d866eb..ac2031b723a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContext.java
@@ -21,6 +21,8 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -41,9 +43,12 @@ public enum ReaderType {
private final boolean primaryReplicaReader;
private final ReaderType type;
private final boolean preadAllBytes;
+ private final SystemKeyCache systemKeyCache;
+ private final ManagedKeyDataCache managedKeyDataCache;
public ReaderContext(Path filePath, FSDataInputStreamWrapper fsdis, long fileSize,
- HFileSystem hfs, boolean primaryReplicaReader, ReaderType type) {
+ HFileSystem hfs, boolean primaryReplicaReader, ReaderType type, SystemKeyCache systemKeyCache,
+ ManagedKeyDataCache managedKeyDataCache) {
this.filePath = filePath;
this.fsdis = fsdis;
this.fileSize = fileSize;
@@ -52,6 +57,8 @@ public ReaderContext(Path filePath, FSDataInputStreamWrapper fsdis, long fileSiz
this.type = type;
this.preadAllBytes = hfs.getConf().getBoolean(HConstants.HFILE_PREAD_ALL_BYTES_ENABLED_KEY,
HConstants.HFILE_PREAD_ALL_BYTES_ENABLED_DEFAULT);
+ this.systemKeyCache = systemKeyCache;
+ this.managedKeyDataCache = managedKeyDataCache;
}
public Path getFilePath() {
@@ -81,4 +88,12 @@ public ReaderType getReaderType() {
public boolean isPreadAllBytes() {
return preadAllBytes;
}
+
+ public SystemKeyCache getSystemKeyCache() {
+ return this.systemKeyCache;
+ }
+
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return this.managedKeyDataCache;
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java
index 718f7fcb78a6..1490299ab1f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ReaderContextBuilder.java
@@ -26,6 +26,8 @@
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -39,6 +41,8 @@ public class ReaderContextBuilder {
private HFileSystem hfs;
private boolean primaryReplicaReader = true;
private ReaderType type = ReaderType.PREAD;
+ private SystemKeyCache systemKeyCache;
+ private ManagedKeyDataCache managedKeyDataCache;
public ReaderContextBuilder() {
}
@@ -53,6 +57,8 @@ private ReaderContextBuilder(ReaderContext readerContext) {
this.fileSize = readerContext.getFileSize();
this.hfs = readerContext.getFileSystem();
this.type = readerContext.getReaderType();
+ this.systemKeyCache = readerContext.getSystemKeyCache();
+ this.managedKeyDataCache = readerContext.getManagedKeyDataCache();
}
public ReaderContextBuilder withFilePath(Path filePath) {
@@ -101,9 +107,20 @@ public ReaderContextBuilder withFileSystemAndPath(FileSystem fs, Path filePath)
return this;
}
+ public ReaderContextBuilder withManagedKeyDataCache(ManagedKeyDataCache managedKeyDataCache) {
+ this.managedKeyDataCache = managedKeyDataCache;
+ return this;
+ }
+
+ public ReaderContextBuilder withSystemKeyCache(SystemKeyCache systemKeyCache) {
+ this.systemKeyCache = systemKeyCache;
+ return this;
+ }
+
public ReaderContext build() {
validateFields();
- return new ReaderContext(filePath, fsdis, fileSize, hfs, primaryReplicaReader, type);
+ return new ReaderContext(filePath, fsdis, fileSize, hfs, primaryReplicaReader, type,
+ systemKeyCache, managedKeyDataCache);
}
private void validateFields() throws IllegalArgumentException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
index 1e4ee2a3e796..957c3c8f726d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
@@ -21,7 +21,6 @@
import java.security.KeyException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.KeyProvider;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
@@ -38,20 +37,19 @@
public abstract class KeyManagementBase {
protected static final Logger LOG = LoggerFactory.getLogger(KeyManagementBase.class);
- private Server server;
+ private KeyManagementService keyManagementService;
private final Configuration configuration;
private Boolean isDynamicLookupEnabled;
private Boolean isKeyManagementEnabled;
- private Integer perCustNamespaceActiveKeyCount;
/**
* Construct with a server instance. Configuration is derived from the server.
* @param server the server instance
*/
- public KeyManagementBase(Server server) {
- this(server.getConfiguration());
- this.server = server;
+ public KeyManagementBase(KeyManagementService keyManagementService) {
+ this(keyManagementService.getConfiguration());
+ this.keyManagementService = keyManagementService;
}
/**
@@ -65,8 +63,8 @@ public KeyManagementBase(Configuration configuration) {
this.configuration = configuration;
}
- protected Server getServer() {
- return server;
+ protected KeyManagementService getKeyManagementService() {
+ return keyManagementService;
}
protected Configuration getConfiguration() {
@@ -150,7 +148,7 @@ protected ManagedKeyData retrieveActiveKey(String encKeyCust, byte[] key_cust,
LOG.info(
"retrieveManagedKey: got managed key with status: {} and metadata: {} for "
+ "(custodian: {}, namespace: {})",
- pbeKey.getKeyState(), pbeKey.getKeyMetadata(), encKeyCust, keyNamespace);
+ pbeKey.getKeyState(), pbeKey.getKeyMetadata(), encKeyCust, pbeKey.getKeyNamespace());
if (accessor != null) {
accessor.addKey(pbeKey);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementService.java
new file mode 100644
index 000000000000..bdb76f5bbe6d
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementService.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public interface KeyManagementService {
+ class DefaultKeyManagementService implements KeyManagementService {
+ private final Configuration configuration;
+ private final ManagedKeyDataCache managedKeyDataCache;
+ private final SystemKeyCache systemKeyCache;
+
+ public DefaultKeyManagementService(Configuration configuration, FileSystem fs) {
+ this.configuration = configuration;
+ this.managedKeyDataCache = new ManagedKeyDataCache(configuration, null);
+ try {
+ this.systemKeyCache = SystemKeyCache.createCache(configuration, fs);
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to create system key cache", e);
+ }
+ }
+
+ @Override
+ public SystemKeyCache getSystemKeyCache() {
+ return systemKeyCache;
+ }
+
+ @Override
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return managedKeyDataCache;
+ }
+
+ @Override
+ public KeymetaAdmin getKeymetaAdmin() {
+ throw new UnsupportedOperationException("KeymetaAdmin is not supported");
+ }
+
+ @Override
+ public Configuration getConfiguration() {
+ return configuration;
+ }
+ }
+
+ static KeyManagementService createDefault(Configuration configuration, FileSystem fs) {
+ return new DefaultKeyManagementService(configuration, fs);
+ }
+
+ /** Returns the cache for cluster keys. */
+ public SystemKeyCache getSystemKeyCache();
+
+ /** Returns the cache for managed keys. */
+ public ManagedKeyDataCache getManagedKeyDataCache();
+
+ /** Returns the admin for keymeta. */
+ public KeymetaAdmin getKeymetaAdmin();
+
+ /** Returns the configuration. */
+ public Configuration getConfiguration();
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyNamespaceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyNamespaceUtil.java
new file mode 100644
index 000000000000..52b6adddc6f7
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyNamespaceUtil.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.regionserver.StoreContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+/**
+ * Utility class for constructing key namespaces used in key management operations.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class KeyNamespaceUtil {
+
+ /**
+ * Construct a key namespace from a table descriptor and column family descriptor.
+ * @param tableDescriptor The table descriptor
+ * @param family The column family descriptor
+ * @return The constructed key namespace
+ */
+ public static String constructKeyNamespace(TableDescriptor tableDescriptor,
+ ColumnFamilyDescriptor family) {
+ return tableDescriptor.getTableName().getNameAsString() + "/" + family.getNameAsString();
+ }
+
+ /**
+ * Construct a key namespace from a store context.
+ * @param storeContext The store context
+ * @return The constructed key namespace
+ */
+ public static String constructKeyNamespace(StoreContext storeContext) {
+ return storeContext.getTableName().getNameAsString() + "/"
+ + storeContext.getFamily().getNameAsString();
+ }
+
+ /**
+ * Construct a key namespace by deriving table name and family name from a store file info.
+ * @param fileInfo The store file info
+ * @return The constructed key namespace
+ */
+ public static String constructKeyNamespace(StoreFileInfo fileInfo) {
+ return constructKeyNamespace(
+ fileInfo.isLink() ? fileInfo.getLink().getOriginPath() : fileInfo.getPath());
+ }
+
+ /**
+ * Construct a key namespace by deriving table name and family name from a store file path.
+ * @param path The path
+ * @return The constructed key namespace
+ */
+ public static String constructKeyNamespace(Path path) {
+ return constructKeyNamespace(path.getParent().getParent().getParent().getName(),
+ path.getParent().getName());
+ }
+
+ /**
+ * Construct a key namespace from a table name and family name.
+ * @param tableName The table name
+ * @param family The family name
+ * @return The constructed key namespace
+ */
+ public static String constructKeyNamespace(String tableName, String family) {
+ // Add precoditions for null check
+ Preconditions.checkNotNull(tableName, "tableName should not be null");
+ Preconditions.checkNotNull(family, "family should not be null");
+ return tableName + "/" + family;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
index 07b73376fa5f..4eb19a602cc0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java
@@ -137,8 +137,7 @@ public void getManagedKeys(RpcController controller, ManagedKeysRequest request,
@InterfaceAudience.Private
public static ManagedKeysResponse.Builder getResponseBuilder(RpcController controller,
ManagedKeysRequest request) {
- ManagedKeysResponse.Builder builder =
- ManagedKeysResponse.newBuilder().setKeyNamespace(request.getKeyNamespace());
+ ManagedKeysResponse.Builder builder = ManagedKeysResponse.newBuilder();
byte[] key_cust = convertToKeyCustBytes(controller, request, builder);
if (key_cust != null) {
builder.setKeyCustBytes(ByteString.copyFrom(key_cust));
@@ -152,9 +151,10 @@ public static GetManagedKeysResponse generateKeyStateResponse(
List managedKeyStates, ManagedKeysResponse.Builder builder) {
GetManagedKeysResponse.Builder responseBuilder = GetManagedKeysResponse.newBuilder();
for (ManagedKeyData keyData : managedKeyStates) {
- builder.setKeyState(ManagedKeysProtos.ManagedKeyState.valueOf(keyData.getKeyState().getVal()))
- .setKeyMetadata(keyData.getKeyMetadata())
- .setRefreshTimestamp(keyData.getRefreshTimestamp());
+ builder
+ .setKeyState(ManagedKeysProtos.ManagedKeyState.forNumber(keyData.getKeyState().getVal()))
+ .setKeyMetadata(keyData.getKeyMetadata()).setRefreshTimestamp(keyData.getRefreshTimestamp())
+ .setKeyNamespace(keyData.getKeyNamespace());
responseBuilder.addState(builder.build());
}
return responseBuilder.build();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
index 6862e35ddf10..8e2a7095cfca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
@@ -21,7 +21,7 @@
import java.security.Key;
import java.security.KeyException;
import java.util.ArrayList;
-import java.util.HashSet;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -76,8 +76,15 @@ public class KeymetaTableAccessor extends KeyManagementBase {
public static final String KEY_STATE_QUAL_NAME = "k";
public static final byte[] KEY_STATE_QUAL_BYTES = Bytes.toBytes(KEY_STATE_QUAL_NAME);
+ private Server server;
+
public KeymetaTableAccessor(Server server) {
- super(server);
+ super(server.getKeyManagementService());
+ this.server = server;
+ }
+
+ public Server getServer() {
+ return server;
}
/**
@@ -113,8 +120,7 @@ public List getAllKeys(byte[] key_cust, String keyNamespace)
throws IOException, KeyException {
assertKeyManagementEnabled();
Connection connection = getServer().getConnection();
- byte[] prefixForScan =
- Bytes.add(Bytes.toBytes(key_cust.length), key_cust, Bytes.toBytes(keyNamespace));
+ byte[] prefixForScan = constructRowKeyForCustNamespace(key_cust, keyNamespace);
PrefixFilter prefixFilter = new PrefixFilter(prefixForScan);
Scan scan = new Scan();
scan.setFilter(prefixFilter);
@@ -122,9 +128,10 @@ public List getAllKeys(byte[] key_cust, String keyNamespace)
try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
ResultScanner scanner = table.getScanner(scan);
- Set allKeys = new HashSet<>();
+ Set allKeys = new LinkedHashSet<>();
for (Result result : scanner) {
- ManagedKeyData keyData = parseFromResult(getServer(), key_cust, keyNamespace, result);
+ ManagedKeyData keyData =
+ parseFromResult(getKeyManagementService(), key_cust, keyNamespace, result);
if (keyData != null) {
allKeys.add(keyData);
}
@@ -147,11 +154,10 @@ public ManagedKeyData getActiveKey(byte[] key_cust, String keyNamespace)
Connection connection = getServer().getConnection();
byte[] rowkeyForGet = constructRowKeyForCustNamespace(key_cust, keyNamespace);
Get get = new Get(rowkeyForGet);
- get.addColumn(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES);
try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
Result result = table.get(get);
- return parseFromResult(getServer(), key_cust, keyNamespace, result);
+ return parseFromResult(getKeyManagementService(), key_cust, keyNamespace, result);
}
}
@@ -200,7 +206,7 @@ private ManagedKeyData getKeyInternal(byte[] key_cust, String keyNamespace,
try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
byte[] rowKey = constructRowKeyForMetadata(key_cust, keyNamespace, keyMetadataHash);
Result result = table.get(new Get(rowKey));
- return parseFromResult(getServer(), key_cust, keyNamespace, result);
+ return parseFromResult(getKeyManagementService(), key_cust, keyNamespace, result);
}
}
@@ -208,10 +214,11 @@ private ManagedKeyData getKeyInternal(byte[] key_cust, String keyNamespace,
* Add the mutation columns to the given Put that are derived from the keyData.
*/
private Put addMutationColumns(Put put, ManagedKeyData keyData) throws IOException {
- ManagedKeyData latestSystemKey = getServer().getSystemKeyCache().getLatestSystemKey();
+ ManagedKeyData latestSystemKey =
+ getKeyManagementService().getSystemKeyCache().getLatestSystemKey();
if (keyData.getTheKey() != null) {
- byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getServer().getConfiguration(), null,
- keyData.getTheKey(), latestSystemKey.getTheKey());
+ byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getConfiguration(), null, keyData.getTheKey(),
+ latestSystemKey.getTheKey());
put
.addColumn(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES,
Bytes.toBytes(keyData.getKeyChecksum()))
@@ -261,12 +268,12 @@ public static byte[] constructRowKeyForCustNamespace(ManagedKeyData keyData) {
@InterfaceAudience.Private
public static byte[] constructRowKeyForCustNamespace(byte[] key_cust, String keyNamespace) {
int custLength = key_cust.length;
- return Bytes.add(Bytes.toBytes(custLength), key_cust, Bytes.toBytesBinary(keyNamespace));
+ return Bytes.add(Bytes.toBytes(custLength), key_cust, Bytes.toBytes(keyNamespace));
}
@InterfaceAudience.Private
- public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, String keyNamespace,
- Result result) throws IOException, KeyException {
+ public static ManagedKeyData parseFromResult(KeyManagementService keyManagementService,
+ byte[] key_cust, String keyNamespace, Result result) throws IOException, KeyException {
if (result == null || result.isEmpty()) {
return null;
}
@@ -285,13 +292,14 @@ public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, Str
if (dekWrappedByStk != null) {
long stkChecksum =
Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES));
- ManagedKeyData clusterKey = server.getSystemKeyCache().getSystemKeyByChecksum(stkChecksum);
+ ManagedKeyData clusterKey =
+ keyManagementService.getSystemKeyCache().getSystemKeyByChecksum(stkChecksum);
if (clusterKey == null) {
LOG.error("Dropping key with metadata: {} as STK with checksum: {} is unavailable",
dekMetadata, stkChecksum);
return null;
}
- dek = EncryptionUtil.unwrapKey(server.getConfiguration(), null, dekWrappedByStk,
+ dek = EncryptionUtil.unwrapKey(keyManagementService.getConfiguration(), null, dekWrappedByStk,
clusterKey.getTheKey());
}
long refreshedTimestamp =
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
index ecac8e1a2857..8de01319e25b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
@@ -25,6 +25,7 @@
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -35,14 +36,24 @@
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
public class SystemKeyAccessor extends KeyManagementBase {
+ private static final Logger LOG = LoggerFactory.getLogger(SystemKeyAccessor.class);
+
+ private final FileSystem fs;
protected final Path systemKeyDir;
public SystemKeyAccessor(Server server) throws IOException {
- super(server);
- this.systemKeyDir = CommonFSUtils.getSystemKeyDir(server.getConfiguration());
+ this(server.getConfiguration(), server.getFileSystem());
+ }
+
+ public SystemKeyAccessor(Configuration configuration, FileSystem fs) throws IOException {
+ super(configuration);
+ this.systemKeyDir = CommonFSUtils.getSystemKeyDir(configuration);
+ this.fs = fs;
}
/**
@@ -52,9 +63,7 @@ public SystemKeyAccessor(Server server) throws IOException {
* is initialized yet.
*/
public Pair> getLatestSystemKeyFile() throws IOException {
- if (!isKeyManagementEnabled()) {
- return new Pair<>(null, null);
- }
+ assertKeyManagementEnabled();
List allClusterKeyFiles = getAllSystemKeyFiles();
if (allClusterKeyFiles.isEmpty()) {
throw new RuntimeException("No cluster key initialized yet");
@@ -72,17 +81,15 @@ public Pair> getLatestSystemKeyFile() throws IOException {
* @throws IOException if there is an error getting the cluster key files
*/
public List getAllSystemKeyFiles() throws IOException {
- if (!isKeyManagementEnabled()) {
- return null;
- }
- FileSystem fs = getServer().getFileSystem();
+ assertKeyManagementEnabled();
+ LOG.info("Getting all system key files from: {} matching prefix: {}", systemKeyDir,
+ SYSTEM_KEY_FILE_PREFIX + "*");
Map clusterKeys = new TreeMap<>(Comparator.reverseOrder());
for (FileStatus st : fs.globStatus(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))) {
Path keyPath = st.getPath();
int seqNum = extractSystemKeySeqNum(keyPath);
clusterKeys.put(seqNum, keyPath);
}
-
return new ArrayList<>(clusterKeys.values());
}
@@ -130,7 +137,7 @@ public static int extractKeySequence(Path clusterKeyFile) throws IOException {
}
protected String loadKeyMetadata(Path keyPath) throws IOException {
- try (FSDataInputStream fin = getServer().getFileSystem().open(keyPath)) {
+ try (FSDataInputStream fin = fs.open(keyPath)) {
return fin.readUTF();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
index bb7a6e3f6935..bcdf2ae11cf0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
@@ -21,6 +21,8 @@
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.yetus.audience.InterfaceAudience;
@@ -35,6 +37,19 @@ public class SystemKeyCache {
private final ManagedKeyData latestSystemKey;
private final Map systemKeys;
+ /**
+ * Create a SystemKeyCache from the specified configuration and file system.
+ * @param configuration the configuration to use
+ * @param fs the file system to use
+ * @return the cache or {@code null} if no keys are found.
+ * @throws IOException if there is an error loading the system keys
+ */
+ public static SystemKeyCache createCache(Configuration configuration, FileSystem fs)
+ throws IOException {
+ SystemKeyAccessor accessor = new SystemKeyAccessor(configuration, fs);
+ return createCache(accessor);
+ }
+
/**
* Construct the System Key cache from the specified accessor.
* @param accessor the accessor to use to load the system keys
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 0573b1a75628..dee9b48f9ea5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.hbck.HbckChore;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
@@ -67,7 +68,7 @@
* adding API. Changes cause ripples through the code base.
*/
@InterfaceAudience.Private
-public interface MasterServices extends Server {
+public interface MasterServices extends Server, KeyManagementService {
/** Returns the underlying snapshot manager */
SnapshotManager getSnapshotManager();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
index 2ca423bad8e9..de0e37dde275 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
@@ -30,9 +30,13 @@
import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
public class SystemKeyManager extends SystemKeyAccessor {
+ private static final Logger LOG = LoggerFactory.getLogger(SystemKeyManager.class);
+
private final MasterServices master;
public SystemKeyManager(MasterServices master) throws IOException {
@@ -63,7 +67,7 @@ public ManagedKeyData rotateSystemKeyIfChanged() throws IOException {
return null;
}
Pair> latestFileResult = getLatestSystemKeyFile();
- Path latestFile = getLatestSystemKeyFile().getFirst();
+ Path latestFile = latestFileResult.getFirst();
String latestKeyMetadata = loadKeyMetadata(latestFile);
return rotateSystemKey(latestKeyMetadata, latestFileResult.getSecond());
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 423297f667d3..59a8285b2f65 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -360,8 +360,7 @@ public List createHdfsRegions(final MasterProcedureEnv env,
throws IOException {
RegionInfo[] regions =
newRegions != null ? newRegions.toArray(new RegionInfo[newRegions.size()]) : null;
- return ModifyRegionUtils.createRegions(env.getMasterConfiguration(), tableRootDir,
- tableDescriptor, regions, null);
+ return ModifyRegionUtils.createRegions(env, tableRootDir, tableDescriptor, regions, null);
}
});
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index 8b4901e90e85..2d54eaf6c58c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -75,10 +75,10 @@ public TableOperationType getTableOperationType() {
return TableOperationType.CREATE;
}
- private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf)
+ private static TableDescriptor writeFsLayout(Path rootDir, MasterProcedureEnv env)
throws IOException {
LOG.info("BOOTSTRAP: creating hbase:meta region");
- FileSystem fs = rootDir.getFileSystem(conf);
+ FileSystem fs = rootDir.getFileSystem(env.getMasterConfiguration());
Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME);
if (fs.exists(tableDir) && !deleteMetaTableDirectoryIfPartial(fs, tableDir)) {
LOG.warn("Can not delete partial created meta table, continue...");
@@ -87,10 +87,11 @@ private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf)
// created here in bootstrap and it'll need to be cleaned up. Better to
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
- TableDescriptor metaDescriptor =
- FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir);
+ TableDescriptor metaDescriptor = FSTableDescriptors
+ .tryUpdateAndGetMetaTableDescriptor(env.getMasterConfiguration(), fs, rootDir);
HRegion
- .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null)
+ .createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, env.getMasterConfiguration(),
+ metaDescriptor, null, env.getMasterServices().getKeyManagementService())
.close();
return metaDescriptor;
}
@@ -104,7 +105,7 @@ protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state)
case INIT_META_WRITE_FS_LAYOUT:
Configuration conf = env.getMasterConfiguration();
Path rootDir = CommonFSUtils.getRootDir(conf);
- TableDescriptor td = writeFsLayout(rootDir, conf);
+ TableDescriptor td = writeFsLayout(rootDir, env);
env.getMasterServices().getTableDescriptors().update(td, true);
setNextState(InitMetaState.INIT_META_ASSIGN_META);
return Flow.HAS_MORE_STATE;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
index 97447e37b7c4..0539fb6250a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegion.java
@@ -29,7 +29,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.RegionTooBusyException;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ConnectionUtils;
@@ -44,6 +43,7 @@
import org.apache.hadoop.hbase.ipc.RpcCall;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.log.HBaseMarkers;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -114,7 +114,7 @@ public final class MasterRegion {
private static final int REGION_ID = 1;
- private final Server server;
+ private final MasterServices server;
private final WALFactory walFactory;
@@ -128,7 +128,7 @@ public final class MasterRegion {
private final long regionUpdateRetryPauseTime;
- private MasterRegion(Server server, HRegion region, WALFactory walFactory,
+ private MasterRegion(MasterServices server, HRegion region, WALFactory walFactory,
MasterRegionFlusherAndCompactor flusherAndCompactor, MasterRegionWALRoller walRoller) {
this.server = server;
this.region = region;
@@ -301,14 +301,15 @@ private static WAL createWAL(WALFactory walFactory, MasterRegionWALRoller walRol
private static HRegion bootstrap(Configuration conf, TableDescriptor td, FileSystem fs,
Path rootDir, FileSystem walFs, Path walRootDir, WALFactory walFactory,
- MasterRegionWALRoller walRoller, String serverName, boolean touchInitializingFlag)
+ MasterRegionWALRoller walRoller, MasterServices server, boolean touchInitializingFlag)
throws IOException {
TableName tn = td.getTableName();
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tn).setRegionId(REGION_ID).build();
Path tableDir = CommonFSUtils.getTableDir(rootDir, tn);
// persist table descriptor
FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir, td, true);
- HRegion.createHRegion(conf, regionInfo, fs, tableDir, td).close();
+ HRegion.createHRegion(conf, regionInfo, fs, tableDir, td, server.getKeyManagementService())
+ .close();
Path initializedFlag = new Path(tableDir, INITIALIZED_FLAG);
if (!fs.mkdirs(initializedFlag)) {
throw new IOException("Can not touch initialized flag: " + initializedFlag);
@@ -317,8 +318,10 @@ private static HRegion bootstrap(Configuration conf, TableDescriptor td, FileSys
if (!fs.delete(initializingFlag, true)) {
LOG.warn("failed to clean up initializing flag: " + initializingFlag);
}
- WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, regionInfo);
- return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null);
+ WAL wal = createWAL(walFactory, walRoller, server.getServerName().toString(), walFs, walRootDir,
+ regionInfo);
+ return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null,
+ server.getKeyManagementService());
}
private static RegionInfo loadRegionInfo(FileSystem fs, Path tableDir) throws IOException {
@@ -330,7 +333,7 @@ private static RegionInfo loadRegionInfo(FileSystem fs, Path tableDir) throws IO
private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo regionInfo,
FileSystem fs, Path rootDir, FileSystem walFs, Path walRootDir, WALFactory walFactory,
- MasterRegionWALRoller walRoller, String serverName) throws IOException {
+ MasterRegionWALRoller walRoller, MasterServices server) throws IOException {
Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName());
Path walRegionDir = FSUtils.getRegionDirFromRootDir(walRootDir, regionInfo);
Path replayEditsDir = new Path(walRegionDir, REPLAY_EDITS_DIR);
@@ -346,7 +349,8 @@ private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo r
// to always exist in normal situations, but we should guard against users changing the
// filesystem outside of HBase's line of sight.
if (walFs.exists(walsDir)) {
- replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, serverName, replayEditsDir);
+ replayWALs(conf, walFs, walRootDir, walsDir, regionInfo, server.getServerName().toString(),
+ replayEditsDir);
} else {
LOG.error(
"UNEXPECTED: WAL directory for MasterRegion is missing." + " {} is unexpectedly missing.",
@@ -354,13 +358,15 @@ private static HRegion open(Configuration conf, TableDescriptor td, RegionInfo r
}
// Create a new WAL
- WAL wal = createWAL(walFactory, walRoller, serverName, walFs, walRootDir, regionInfo);
+ WAL wal = createWAL(walFactory, walRoller, server.getServerName().toString(), walFs, walRootDir,
+ regionInfo);
conf.set(HRegion.SPECIAL_RECOVERED_EDITS_DIR,
replayEditsDir.makeQualified(walFs.getUri(), walFs.getWorkingDirectory()).toString());
// we do not do WAL splitting here so it is possible to have uncleanly closed WAL files, so we
// need to ignore EOFException.
conf.setBoolean(HRegion.RECOVERED_EDITS_IGNORE_EOF, true);
- return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null);
+ return HRegion.openHRegionFromTableDir(conf, fs, tableDir, regionInfo, td, wal, null, null,
+ server);
}
private static void replayWALs(Configuration conf, FileSystem walFs, Path walRootDir,
@@ -437,7 +443,7 @@ private static void tryMigrate(Configuration conf, FileSystem fs, Path tableDir,
public static MasterRegion create(MasterRegionParams params) throws IOException {
TableDescriptor td = params.tableDescriptor();
LOG.info("Create or load local region for table " + td);
- Server server = params.server();
+ MasterServices server = params.server();
Configuration baseConf = server.getConfiguration();
FileSystem fs = CommonFSUtils.getRootDirFileSystem(baseConf);
FileSystem walFs = CommonFSUtils.getWALFileSystem(baseConf);
@@ -476,8 +482,8 @@ public static MasterRegion create(MasterRegionParams params) throws IOException
if (!fs.mkdirs(initializedFlag)) {
throw new IOException("Can not touch initialized flag");
}
- region = bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller,
- server.getServerName().toString(), true);
+ region =
+ bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server, true);
} else {
if (!fs.exists(initializedFlag)) {
if (!fs.exists(initializingFlag)) {
@@ -495,7 +501,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException
RegionInfo regionInfo = loadRegionInfo(fs, tableDir);
tryMigrate(conf, fs, tableDir, regionInfo, oldTd, td);
region = open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller,
- server.getServerName().toString());
+ server);
} else {
// delete all contents besides the initializing flag, here we can make sure tableDir
// exists(unless someone delete it manually...), so we do not do null check here.
@@ -505,7 +511,7 @@ public static MasterRegion create(MasterRegionParams params) throws IOException
}
}
region = bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller,
- server.getServerName().toString(), false);
+ server, false);
}
} else {
if (fs.exists(initializingFlag) && !fs.delete(initializingFlag, true)) {
@@ -515,8 +521,8 @@ public static MasterRegion create(MasterRegionParams params) throws IOException
TableDescriptor oldTd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
RegionInfo regionInfo = loadRegionInfo(fs, tableDir);
tryMigrate(conf, fs, tableDir, regionInfo, oldTd, td);
- region = open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller,
- server.getServerName().toString());
+ region =
+ open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
index 71fb76bd0f1b..878f8dc17a1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionFactory.java
@@ -21,12 +21,12 @@
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
@@ -113,7 +113,7 @@ private static TableDescriptor withTrackerConfigs(Configuration conf) {
return tracker.updateWithTrackerConfigs(TableDescriptorBuilder.newBuilder(TABLE_DESC)).build();
}
- public static MasterRegion create(Server server) throws IOException {
+ public static MasterRegion create(MasterServices server) throws IOException {
Configuration conf = server.getConfiguration();
MasterRegionParams params = new MasterRegionParams().server(server)
.regionDirName(MASTER_STORE_DIR).tableDescriptor(withTrackerConfigs(conf));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
index b9065747b669..443bca9f8c97 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/region/MasterRegionParams.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hbase.master.region;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -27,7 +27,7 @@
@InterfaceAudience.Private
public class MasterRegionParams {
- private Server server;
+ private MasterServices server;
private String regionDirName;
@@ -55,7 +55,7 @@ public class MasterRegionParams {
private Boolean useMetaCellComparator;
- public MasterRegionParams server(Server server) {
+ public MasterRegionParams server(MasterServices server) {
this.server = server;
return this;
}
@@ -125,7 +125,7 @@ public MasterRegionParams useMetaCellComparator(boolean useMetaCellComparator) {
return this;
}
- public Server server() {
+ public MasterServices server() {
return server;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 7936197ff8d8..99aca4f6abde 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -90,6 +90,7 @@
import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
@@ -146,6 +147,9 @@
import org.apache.hadoop.hbase.ipc.RpcCall;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerCall;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@@ -166,6 +170,7 @@
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
import org.apache.hadoop.hbase.replication.ReplicationUtils;
import org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
@@ -382,6 +387,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
private final Configuration baseConf;
private final int rowLockWaitDuration;
static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000;
+ private final ManagedKeyDataCache managedKeyDataCache;
+ private final SystemKeyCache systemKeyCache;
private Path regionWalDir;
private FileSystem walFS;
@@ -769,8 +776,36 @@ void sawNoSuchFamily() {
public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd,
final RegionServerServices rsServices) {
+ this(tableDir, wal, fs, confParam, regionInfo, htd, rsServices, null);
+ }
+
+ /**
+ * HRegion constructor. This constructor should only be used for testing and extensions. Instances
+ * of HRegion should be instantiated with the {@link HRegion#createHRegion} or
+ * {@link HRegion#openHRegion} method.
+ * @param tableDir qualified path of directory where region should be located, usually
+ * the table directory.
+ * @param wal The WAL is the outbound log for any updates to the HRegion The wal
+ * file is a logfile from the previous execution that's
+ * custom-computed for this HRegion. The HRegionServer computes and
+ * sorts the appropriate wal info for this HRegion. If there is a
+ * previous wal file (implying that the HRegion has been written-to
+ * before), then read it from the supplied path.
+ * @param fs is the filesystem.
+ * @param confParam is global configuration settings.
+ * @param regionInfo - RegionInfo that describes the region is new), then read them from
+ * the supplied path.
+ * @param htd the table descriptor
+ * @param rsServices reference to {@link RegionServerServices} or null
+ * @param keyManagementService reference to {@link KeyManagementService} or null
+ * @deprecated Use other constructors.
+ */
+ @Deprecated
+ public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
+ final Configuration confParam, final RegionInfo regionInfo, final TableDescriptor htd,
+ final RegionServerServices rsServices, final KeyManagementService keyManagementService) {
this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo), wal, confParam, htd,
- rsServices);
+ rsServices, keyManagementService);
}
/**
@@ -789,6 +824,28 @@ public HRegion(final Path tableDir, final WAL wal, final FileSystem fs,
*/
public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam,
final TableDescriptor htd, final RegionServerServices rsServices) {
+ this(fs, wal, confParam, htd, rsServices, null);
+ }
+
+ /**
+ * HRegion constructor. This constructor should only be used for testing and extensions. Instances
+ * of HRegion should be instantiated with the {@link HRegion#createHRegion} or
+ * {@link HRegion#openHRegion} method.
+ * @param fs is the filesystem.
+ * @param wal The WAL is the outbound log for any updates to the HRegion The wal
+ * file is a logfile from the previous execution that's
+ * custom-computed for this HRegion. The HRegionServer computes and
+ * sorts the appropriate wal info for this HRegion. If there is a
+ * previous wal file (implying that the HRegion has been written-to
+ * before), then read it from the supplied path.
+ * @param confParam is global configuration settings.
+ * @param htd the table descriptor
+ * @param rsServices reference to {@link RegionServerServices} or null
+ * @param keyManagementService reference to {@link KeyManagementService} or null
+ */
+ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration confParam,
+ final TableDescriptor htd, final RegionServerServices rsServices,
+ KeyManagementService keyManagementService) {
if (htd == null) {
throw new IllegalArgumentException("Need table descriptor");
}
@@ -929,6 +986,17 @@ public HRegion(final HRegionFileSystem fs, final WAL wal, final Configuration co
minBlockSizeBytes = Arrays.stream(this.htableDescriptor.getColumnFamilies())
.mapToInt(ColumnFamilyDescriptor::getBlocksize).min().orElse(HConstants.DEFAULT_BLOCKSIZE);
+
+ if (SecurityUtil.isKeyManagementEnabled(conf)) {
+ if (keyManagementService == null) {
+ keyManagementService = KeyManagementService.createDefault(conf, fs.getFileSystem());
+ }
+ this.managedKeyDataCache = keyManagementService.getManagedKeyDataCache();
+ this.systemKeyCache = keyManagementService.getSystemKeyCache();
+ } else {
+ this.managedKeyDataCache = null;
+ this.systemKeyCache = null;
+ }
}
private void setHTableSpecificConf() {
@@ -2122,6 +2190,14 @@ public BlockCache getBlockCache() {
return this.blockCache;
}
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return this.managedKeyDataCache;
+ }
+
+ public SystemKeyCache getSystemKeyCache() {
+ return this.systemKeyCache;
+ }
+
/**
* Only used for unit test which doesn't start region server.
*/
@@ -7579,37 +7655,60 @@ public String toString() {
}
// Utility methods
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
+ public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf,
+ RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) {
+ return newHRegion(tableDir, wal, fs, conf, regionInfo, htd, rsServices, null);
+ }
+
/**
* A utility method to create new instances of HRegion based on the {@link HConstants#REGION_IMPL}
* configuration property.
- * @param tableDir qualified path of directory where region should be located, usually the table
- * directory.
- * @param wal The WAL is the outbound log for any updates to the HRegion The wal file is a
- * logfile from the previous execution that's custom-computed for this HRegion.
- * The HRegionServer computes and sorts the appropriate wal info for this
- * HRegion. If there is a previous file (implying that the HRegion has been
- * written-to before), then read it from the supplied path.
- * @param fs is the filesystem.
- * @param conf is global configuration settings.
- * @param regionInfo - RegionInfo that describes the region is new), then read them from the
- * supplied path.
- * @param htd the table descriptor
+ * @param tableDir qualified path of directory where region should be located, usually
+ * the table directory.
+ * @param wal The WAL is the outbound log for any updates to the HRegion The wal
+ * file is a logfile from the previous execution that's
+ * custom-computed for this HRegion. The HRegionServer computes and
+ * sorts the appropriate wal info for this HRegion. If there is a
+ * previous file (implying that the HRegion has been written-to
+ * before), then read it from the supplied path.
+ * @param fs is the filesystem.
+ * @param conf is global configuration settings.
+ * @param regionInfo - RegionInfo that describes the region is new), then read them from
+ * the supplied path.
+ * @param htd the table descriptor
+ * @param keyManagementService reference to {@link KeyManagementService} or null
* @return the new instance
*/
public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf,
- RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) {
+ RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices,
+ final KeyManagementService keyManagementService) {
+ List> ctorArgTypes =
+ Arrays.asList(Path.class, WAL.class, FileSystem.class, Configuration.class, RegionInfo.class,
+ TableDescriptor.class, RegionServerServices.class, KeyManagementService.class);
+ List ctorArgs =
+ Arrays.asList(tableDir, wal, fs, conf, regionInfo, htd, rsServices, keyManagementService);
+
+ try {
+ return createInstance(conf, ctorArgTypes, ctorArgs);
+ } catch (Throwable e) {
+ // Try the old signature for the sake of test code.
+ return createInstance(conf, ctorArgTypes.subList(0, ctorArgTypes.size() - 1),
+ ctorArgs.subList(0, ctorArgs.size() - 1));
+ }
+ }
+
+ private static HRegion createInstance(Configuration conf, List> ctorArgTypes,
+ List ctorArgs) {
try {
@SuppressWarnings("unchecked")
Class extends HRegion> regionClass =
(Class extends HRegion>) conf.getClass(HConstants.REGION_IMPL, HRegion.class);
Constructor extends HRegion> c =
- regionClass.getConstructor(Path.class, WAL.class, FileSystem.class, Configuration.class,
- RegionInfo.class, TableDescriptor.class, RegionServerServices.class);
-
- return c.newInstance(tableDir, wal, fs, conf, regionInfo, htd, rsServices);
+ regionClass.getConstructor(ctorArgTypes.toArray(new Class>[ctorArgTypes.size()]));
+ return c.newInstance(ctorArgs.toArray(new Object[ctorArgs.size()]));
} catch (Throwable e) {
- // todo: what should I throw here?
throw new IllegalStateException("Could not instantiate a region instance.", e);
}
}
@@ -7622,6 +7721,7 @@ public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configur
* @param initialize - true to initialize the region
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize) throws IOException {
@@ -7637,16 +7737,35 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
* @param rsRpcServices An interface we can request flushes against.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal,
final boolean initialize, RegionServerServices rsRpcServices) throws IOException {
+ return createHRegion(info, rootDir, conf, hTableDescriptor, wal, initialize, rsRpcServices,
+ null);
+ }
+
+ /**
+ * Convenience method creating new HRegions. Used by createTable.
+ * @param info Info for region to create.
+ * @param rootDir Root directory for HBase instance
+ * @param wal shared WAL
+ * @param initialize - true to initialize the region
+ * @param rsRpcServices An interface we can request flushes against.
+ * @param keyManagementService reference to {@link KeyManagementService} or null
+ * @return new HRegion
+ */
+ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
+ final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal,
+ final boolean initialize, RegionServerServices rsRpcServices,
+ final KeyManagementService keyManagementService) throws IOException {
LOG.info("creating " + info + ", tableDescriptor="
+ (hTableDescriptor == null ? "null" : hTableDescriptor) + ", regionDir=" + rootDir);
createRegionDir(conf, info, rootDir);
FileSystem fs = rootDir.getFileSystem(conf);
Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable());
- HRegion region =
- HRegion.newHRegion(tableDir, wal, fs, conf, info, hTableDescriptor, rsRpcServices);
+ HRegion region = HRegion.newHRegion(tableDir, wal, fs, conf, info, hTableDescriptor,
+ rsRpcServices, keyManagementService);
if (initialize) {
region.initialize(null);
}
@@ -7657,11 +7776,13 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
* Create a region under the given table directory.
*/
public static HRegion createHRegion(Configuration conf, RegionInfo regionInfo, FileSystem fs,
- Path tableDir, TableDescriptor tableDesc) throws IOException {
+ Path tableDir, TableDescriptor tableDesc, KeyManagementService keyManagementService)
+ throws IOException {
LOG.info("Creating {}, tableDescriptor={}, under table dir {}", regionInfo, tableDesc,
tableDir);
HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo);
- HRegion region = HRegion.newHRegion(tableDir, null, fs, conf, regionInfo, tableDesc, null);
+ HRegion region = HRegion.newHRegion(tableDir, null, fs, conf, regionInfo, tableDesc, null,
+ keyManagementService);
return region;
}
@@ -7680,7 +7801,14 @@ public static HRegionFileSystem createRegionDir(Configuration configuration, Reg
public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal)
throws IOException {
- return createHRegion(info, rootDir, conf, hTableDescriptor, wal, true);
+ return createHRegion(info, rootDir, conf, hTableDescriptor, wal, null);
+ }
+
+ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
+ final Configuration conf, final TableDescriptor hTableDescriptor, final WAL wal,
+ final KeyManagementService keyManagementService) throws IOException {
+ return createHRegion(info, rootDir, conf, hTableDescriptor, wal, true, null,
+ keyManagementService);
}
/**
@@ -7691,6 +7819,7 @@ public static HRegion createHRegion(final RegionInfo info, final Path rootDir,
* properly kept up. HRegionStore does this every time it opens a new region.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal,
final Configuration conf) throws IOException {
return openHRegion(info, htd, wal, conf, null, null);
@@ -7712,7 +7841,8 @@ public static HRegion openHRegion(final RegionInfo info, final TableDescriptor h
public static HRegion openHRegion(final RegionInfo info, final TableDescriptor htd, final WAL wal,
final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter) throws IOException {
- return openHRegion(CommonFSUtils.getRootDir(conf), info, htd, wal, conf, rsServices, reporter);
+ return openHRegion(CommonFSUtils.getRootDir(conf), info, htd, wal, conf, rsServices, reporter,
+ rsServices);
}
/**
@@ -7726,9 +7856,10 @@ public static HRegion openHRegion(final RegionInfo info, final TableDescriptor h
* @param conf The Configuration object to use.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(Path rootDir, final RegionInfo info, final TableDescriptor htd,
final WAL wal, final Configuration conf) throws IOException {
- return openHRegion(rootDir, info, htd, wal, conf, null, null);
+ return openHRegion(rootDir, info, htd, wal, conf, null, null, null);
}
/**
@@ -7745,10 +7876,33 @@ public static HRegion openHRegion(Path rootDir, final RegionInfo info, final Tab
* @param reporter An interface we can report progress against.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
final TableDescriptor htd, final WAL wal, final Configuration conf,
final RegionServerServices rsServices, final CancelableProgressable reporter)
throws IOException {
+ return openHRegion(rootDir, info, htd, wal, conf, rsServices, reporter, null);
+ }
+
+ /**
+ * Open a Region.
+ * @param rootDir Root directory for HBase instance
+ * @param info Info for region to be opened.
+ * @param htd the table descriptor
+ * @param wal WAL for region to use. This method will call
+ * WAL#setSequenceNumber(long) passing the result of the call to
+ * HRegion#getMinSequenceId() to ensure the wal id is properly kept
+ * up. HRegionStore does this every time it opens a new region.
+ * @param conf The Configuration object to use.
+ * @param rsServices An interface we can request flushes against.
+ * @param reporter An interface we can report progress against.
+ * @param keyManagementService reference to {@link KeyManagementService} or null
+ * @return new HRegion
+ */
+ public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
+ final TableDescriptor htd, final WAL wal, final Configuration conf,
+ final RegionServerServices rsServices, final CancelableProgressable reporter,
+ final KeyManagementService keyManagementService) throws IOException {
FileSystem fs = null;
if (rsServices != null) {
fs = rsServices.getFileSystem();
@@ -7756,7 +7910,8 @@ public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
if (fs == null) {
fs = rootDir.getFileSystem(conf);
}
- return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter);
+ return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter,
+ keyManagementService);
}
/**
@@ -7771,57 +7926,70 @@ public static HRegion openHRegion(final Path rootDir, final RegionInfo info,
* properly kept up. HRegionStore does this every time it opens a new region.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal)
throws IOException {
- return openHRegion(conf, fs, rootDir, info, htd, wal, null, null);
+ return openHRegion(conf, fs, rootDir, info, htd, wal, null, null, null);
+ }
+
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
+ public static HRegion openHRegion(final Configuration conf, final FileSystem fs,
+ final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal,
+ final RegionServerServices rsServices, final CancelableProgressable reporter)
+ throws IOException {
+ return openHRegion(conf, fs, rootDir, info, htd, wal, rsServices, reporter, null);
}
/**
* Open a Region.
- * @param conf The Configuration object to use.
- * @param fs Filesystem to use
- * @param rootDir Root directory for HBase instance
- * @param info Info for region to be opened.
- * @param htd the table descriptor
- * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long)
- * passing the result of the call to HRegion#getMinSequenceId() to ensure the
- * wal id is properly kept up. HRegionStore does this every time it opens a new
- * region.
- * @param rsServices An interface we can request flushes against.
- * @param reporter An interface we can report progress against.
+ * @param conf The Configuration object to use.
+ * @param fs Filesystem to use
+ * @param rootDir Root directory for HBase instance
+ * @param info Info for region to be opened.
+ * @param htd the table descriptor
+ * @param wal WAL for region to use. This method will call
+ * WAL#setSequenceNumber(long) passing the result of the call to
+ * HRegion#getMinSequenceId() to ensure the wal id is properly kept
+ * up. HRegionStore does this every time it opens a new region.
+ * @param rsServices An interface we can request flushes against.
+ * @param reporter An interface we can report progress against.
+ * @param keyManagementService reference to {@link KeyManagementService} or null
* @return new HRegion
*/
public static HRegion openHRegion(final Configuration conf, final FileSystem fs,
final Path rootDir, final RegionInfo info, final TableDescriptor htd, final WAL wal,
- final RegionServerServices rsServices, final CancelableProgressable reporter)
- throws IOException {
+ final RegionServerServices rsServices, final CancelableProgressable reporter,
+ final KeyManagementService keyManagementService) throws IOException {
Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable());
- return openHRegionFromTableDir(conf, fs, tableDir, info, htd, wal, rsServices, reporter);
+ return openHRegionFromTableDir(conf, fs, tableDir, info, htd, wal, rsServices, reporter,
+ keyManagementService);
}
/**
* Open a Region.
- * @param conf The Configuration object to use.
- * @param fs Filesystem to use
- * @param info Info for region to be opened.
- * @param htd the table descriptor
- * @param wal WAL for region to use. This method will call WAL#setSequenceNumber(long)
- * passing the result of the call to HRegion#getMinSequenceId() to ensure the
- * wal id is properly kept up. HRegionStore does this every time it opens a new
- * region.
- * @param rsServices An interface we can request flushes against.
- * @param reporter An interface we can report progress against.
+ * @param conf The Configuration object to use.
+ * @param fs Filesystem to use
+ * @param info Info for region to be opened.
+ * @param htd the table descriptor
+ * @param wal WAL for region to use. This method will call
+ * WAL#setSequenceNumber(long) passing the result of the call to
+ * HRegion#getMinSequenceId() to ensure the wal id is properly kept
+ * up. HRegionStore does this every time it opens a new region.
+ * @param rsServices An interface we can request flushes against.
+ * @param reporter An interface we can report progress against.
+ * @param keyManagementService reference to {@link KeyManagementService} or null
* @return new HRegion
* @throws NullPointerException if {@code info} is {@code null}
*/
public static HRegion openHRegionFromTableDir(final Configuration conf, final FileSystem fs,
final Path tableDir, final RegionInfo info, final TableDescriptor htd, final WAL wal,
- final RegionServerServices rsServices, final CancelableProgressable reporter)
- throws IOException {
+ final RegionServerServices rsServices, final CancelableProgressable reporter,
+ final KeyManagementService keyManagementService) throws IOException {
Objects.requireNonNull(info, "RegionInfo cannot be null");
LOG.debug("Opening region: {}", info);
- HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices);
+ HRegion r =
+ HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices, keyManagementService);
return r.openHRegion(reporter);
}
@@ -7835,19 +8003,15 @@ public NavigableMap getReplicationScope() {
* @param reporter An interface we can report progress against.
* @return new HRegion
*/
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter)
throws IOException {
HRegionFileSystem regionFs = other.getRegionFileSystem();
HRegion r = newHRegion(regionFs.getTableDir(), other.getWAL(), regionFs.getFileSystem(),
- other.baseConf, other.getRegionInfo(), other.getTableDescriptor(), null);
+ other.baseConf, other.getRegionInfo(), other.getTableDescriptor(), null, null);
return r.openHRegion(reporter);
}
- public static Region openHRegion(final Region other, final CancelableProgressable reporter)
- throws IOException {
- return openHRegion((HRegion) other, reporter);
- }
-
/**
* Open HRegion.
*
@@ -7913,7 +8077,7 @@ public static HRegion openReadOnlyFileSystemHRegion(final Configuration conf, fi
if (info.getReplicaId() <= 0) {
info = RegionReplicaUtil.getRegionInfoForReplica(info, 1);
}
- HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null);
+ HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null, null);
r.writestate.setReadOnly(true);
return r.openHRegion(null);
}
@@ -7933,7 +8097,7 @@ public static HRegion warmupHRegion(final RegionInfo info, final TableDescriptor
if (fs == null) {
fs = rootDir.getFileSystem(conf);
}
- HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null);
+ HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null, null);
r.initializeWarmup(reporter);
r.close();
return r;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 98299c47302c..995f7fa6c47f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -81,6 +81,7 @@
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.quotas.RegionSizeStore;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@@ -93,7 +94,7 @@
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
@@ -336,7 +337,9 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family,
private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException {
return new StoreContext.Builder().withBlockSize(family.getBlocksize())
- .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family))
+ .withEncryptionContext(SecurityUtil.createEncryptionContext(conf, family,
+ region.getManagedKeyDataCache(), region.getSystemKeyCache(),
+ KeyNamespaceUtil.constructKeyNamespace(region.getTableDescriptor(), family)))
.withBloomType(family.getBloomFilterType()).withCacheConfig(createCacheConf(family))
.withCellComparator(region.getTableDescriptor().isMetaTable() || conf
.getBoolean(HRegion.USE_META_CELL_COMPARATOR, HRegion.DEFAULT_USE_META_CELL_COMPARATOR)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
index a7df71f460e4..0fb5c2e5f940 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
@@ -43,7 +45,11 @@
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.ReaderContext;
import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@@ -213,9 +219,16 @@ public long getMaxMemStoreTS() {
*/
private final BloomType cfBloomType;
+ private String keyNamespace;
+
+ private SystemKeyCache systemKeyCache;
+
+ private final ManagedKeyDataCache managedKeyDataCache;
+
/**
* Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
- * depending on the underlying files (10-20MB?).
+ * depending on the underlying files (10-20MB?). Since this is used only in read path, key
+ * namespace is not needed.
* @param fs The current file system to use.
* @param p The path of the file.
* @param conf The current configuration.
@@ -229,7 +242,9 @@ public long getMaxMemStoreTS() {
*/
public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheConf,
BloomType cfBloomType, boolean primaryReplica, StoreFileTracker sft) throws IOException {
- this(sft.getStoreFileInfo(p, primaryReplica), cfBloomType, cacheConf);
+ this(sft.getStoreFileInfo(p, primaryReplica), cfBloomType, cacheConf, null, null,
+ SecurityUtil.isKeyManagementEnabled(conf) ? SystemKeyCache.createCache(conf, fs) : null,
+ SecurityUtil.isKeyManagementEnabled(conf) ? new ManagedKeyDataCache(conf, null) : null);
}
/**
@@ -243,8 +258,15 @@ public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheCo
* ignored.
* @param cacheConf The cache configuration and block cache reference.
*/
- public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf) {
- this(fileInfo, cfBloomType, cacheConf, null);
+ public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf)
+ throws IOException {
+ this(fileInfo, cfBloomType, cacheConf, null, KeyNamespaceUtil.constructKeyNamespace(fileInfo),
+ SecurityUtil.isKeyManagementEnabled(fileInfo.getConf())
+ ? SystemKeyCache.createCache(fileInfo.getConf(), fileInfo.getFileSystem())
+ : null,
+ SecurityUtil.isKeyManagementEnabled(fileInfo.getConf())
+ ? new ManagedKeyDataCache(fileInfo.getConf(), null)
+ : null);
}
/**
@@ -260,10 +282,14 @@ public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cac
* @param metrics Tracks bloom filter requests and results. May be null.
*/
public HStoreFile(StoreFileInfo fileInfo, BloomType cfBloomType, CacheConfig cacheConf,
- BloomFilterMetrics metrics) {
+ BloomFilterMetrics metrics, String keyNamespace, SystemKeyCache systemKeyCache,
+ ManagedKeyDataCache managedKeyDataCache) {
this.fileInfo = fileInfo;
this.cacheConf = cacheConf;
this.metrics = metrics;
+ this.keyNamespace = keyNamespace != null ? keyNamespace : KEY_SPACE_GLOBAL;
+ this.systemKeyCache = systemKeyCache;
+ this.managedKeyDataCache = managedKeyDataCache;
if (BloomFilterFactory.isGeneralBloomEnabled(fileInfo.getConf())) {
this.cfBloomType = cfBloomType;
} else {
@@ -392,7 +418,8 @@ public HDFSBlocksDistribution getHDFSBlockDistribution() {
private void open() throws IOException {
fileInfo.initHDFSBlocksDistribution();
long readahead = fileInfo.isNoReadahead() ? 0L : -1L;
- ReaderContext context = fileInfo.createReaderContext(false, readahead, ReaderType.PREAD);
+ ReaderContext context = fileInfo.createReaderContext(false, readahead, ReaderType.PREAD,
+ keyNamespace, systemKeyCache, managedKeyDataCache);
fileInfo.initHFileInfo(context);
StoreFileReader reader = fileInfo.preStoreFileReaderOpen(context, cacheConf);
if (reader == null) {
@@ -540,7 +567,8 @@ public void initReader() throws IOException {
private StoreFileReader createStreamReader(boolean canUseDropBehind) throws IOException {
initReader();
final boolean doDropBehind = canUseDropBehind && cacheConf.shouldDropBehindCompaction();
- ReaderContext context = fileInfo.createReaderContext(doDropBehind, -1, ReaderType.STREAM);
+ ReaderContext context = fileInfo.createReaderContext(doDropBehind, -1, ReaderType.STREAM,
+ keyNamespace, systemKeyCache, managedKeyDataCache);
StoreFileReader reader = fileInfo.preStoreFileReaderOpen(context, cacheConf);
if (reader == null) {
reader = fileInfo.createReader(context, cacheConf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index a46e2dae695c..db5cec9f3228 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
@@ -54,7 +55,8 @@
* judicious adding API. Changes cause ripples through the code base.
*/
@InterfaceAudience.Private
-public interface RegionServerServices extends Server, MutableOnlineRegions, FavoredNodesForRegion {
+public interface RegionServerServices
+ extends Server, MutableOnlineRegions, FavoredNodesForRegion, KeyManagementService {
/** Returns the WAL for a particular region. Pass null for getting the default (common) WAL */
WAL getWAL(RegionInfo regionInfo) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
index 1c837d216f38..998332637373 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
@@ -25,6 +25,8 @@
import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorConfig;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.yetus.audience.InterfaceAudience;
@@ -117,4 +119,12 @@ public int getNumStores() {
long getMemStoreSize() {
return region.getMemStoreDataSize();
}
+
+ public ManagedKeyDataCache getManagedKeyDataCache() {
+ return rsServices.getManagedKeyDataCache();
+ }
+
+ public SystemKeyCache getSystemKeyCache() {
+ return rsServices.getSystemKeyCache();
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
index 30cf5e2a92fa..08e710826358 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreEngine.java
@@ -41,6 +41,9 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.io.hfile.BloomFilterMetrics;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionPolicy;
@@ -116,6 +119,10 @@ public abstract class StoreEngine storeFiles) throws IOException;
+
+ /**
+ * Get the store context. Get the store context.
+ * @return the store context.
+ */
+ StoreContext getStoreContext();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
index 779a114af594..87eca7b93c9c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
@@ -375,6 +375,11 @@ public String createFromHFileLink(final String hfileLinkName, final boolean crea
createBackRef);
}
+ @Override
+ public StoreContext getStoreContext() {
+ return ctx;
+ }
+
public void removeStoreFiles(List storeFiles) throws IOException {
archiveStoreFiles(storeFiles);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index 9bbaf8cd72d2..1ca3f68ee997 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -41,9 +41,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
-import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
-import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.master.replication.OfflineTableReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationGroupOffset;
@@ -369,21 +367,6 @@ public ChoreService getChoreService() {
return null;
}
- @Override
- public SystemKeyCache getSystemKeyCache() {
- return null;
- }
-
- @Override
- public ManagedKeyDataCache getManagedKeyDataCache() {
- return null;
- }
-
- @Override
- public KeymetaAdmin getKeymetaAdmin() {
- return null;
- }
-
@Override
public FileSystem getFileSystem() {
return null;
@@ -403,5 +386,10 @@ public Connection createConnection(Configuration conf) throws IOException {
public AsyncClusterConnection getAsyncClusterConnection() {
return null;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
index 92b5f340a610..2e6e4cb4f933 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
@@ -17,8 +17,19 @@
*/
package org.apache.hadoop.hbase.security;
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.io.crypto.Cipher;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@@ -28,7 +39,6 @@
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class SecurityUtil {
-
/**
* Get the user name from a principal
*/
@@ -48,6 +58,183 @@ public static String getPrincipalWithoutRealm(final String principal) {
return (i > -1) ? principal.substring(0, i) : principal;
}
+ /**
+ * Helper to create an encyption context with current encryption key, suitable for writes.
+ * @param conf The current configuration.
+ * @param family The current column descriptor.
+ * @param managedKeyDataCache The managed key data cache.
+ * @param systemKeyCache The system key cache.
+ * @param keyNamespace The key namespace.
+ * @return The created encryption context.
+ * @throws IOException if an encryption key for the column cannot be unwrapped
+ * @throws IllegalStateException in case of encryption related configuration errors
+ */
+ public static Encryption.Context createEncryptionContext(Configuration conf,
+ ColumnFamilyDescriptor family, ManagedKeyDataCache managedKeyDataCache,
+ SystemKeyCache systemKeyCache, String keyNamespace) throws IOException {
+ Encryption.Context cryptoContext = Encryption.Context.NONE;
+ String cipherName = family.getEncryptionType();
+ if (cipherName != null) {
+ if (!Encryption.isEncryptionEnabled(conf)) {
+ throw new IllegalStateException("Encryption for family '" + family.getNameAsString()
+ + "' configured with type '" + cipherName + "' but the encryption feature is disabled");
+ }
+ Cipher cipher = null;
+ Key key = null;
+ ManagedKeyData kekKeyData = null;
+ if (isKeyManagementEnabled(conf)) {
+ kekKeyData = managedKeyDataCache.getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES,
+ keyNamespace);
+ // If no active key found in the specific namespace, try the global namespace
+ if (kekKeyData == null) {
+ kekKeyData = managedKeyDataCache.getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES,
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ keyNamespace = ManagedKeyData.KEY_SPACE_GLOBAL;
+ }
+ if (kekKeyData == null) {
+ throw new IOException(
+ "No active key found for custodian: " + ManagedKeyData.KEY_GLOBAL_CUSTODIAN
+ + " in namespaces: " + keyNamespace + " and " + ManagedKeyData.KEY_SPACE_GLOBAL);
+ }
+ if (
+ conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_DEFAULT_ENABLED)
+ ) {
+ cipher =
+ getCipherIfValid(conf, cipherName, kekKeyData.getTheKey(), family.getNameAsString());
+ } else {
+ key = kekKeyData.getTheKey();
+ kekKeyData = systemKeyCache.getLatestSystemKey();
+ }
+ } else {
+ byte[] keyBytes = family.getEncryptionKey();
+ if (keyBytes != null) {
+ // Family provides specific key material
+ key = EncryptionUtil.unwrapKey(conf, keyBytes);
+ } else {
+ cipher = getCipherIfValid(conf, cipherName, null, null);
+ }
+ }
+ if (key != null || cipher != null) {
+ if (key == null) {
+ // Family does not provide key material, create a random key
+ key = cipher.getRandomKey();
+ }
+ if (cipher == null) {
+ cipher = getCipherIfValid(conf, cipherName, key, family.getNameAsString());
+ }
+ cryptoContext = Encryption.newContext(conf);
+ cryptoContext.setCipher(cipher);
+ cryptoContext.setKey(key);
+ cryptoContext.setKeyNamespace(keyNamespace);
+ cryptoContext.setKEKData(kekKeyData);
+ }
+ }
+ return cryptoContext;
+ }
+
+ /**
+ * Create an encryption context from encryption key found in a file trailer, suitable for read.
+ * @param conf The current configuration.
+ * @param path The path of the file.
+ * @param trailer The file trailer.
+ * @param managedKeyDataCache The managed key data cache.
+ * @param systemKeyCache The system key cache.
+ * @return The created encryption context or null if no key material is available.
+ * @throws IOException if an encryption key for the file cannot be unwrapped
+ */
+ public static Encryption.Context createEncryptionContext(Configuration conf, Path path,
+ FixedFileTrailer trailer, ManagedKeyDataCache managedKeyDataCache,
+ SystemKeyCache systemKeyCache) throws IOException {
+ ManagedKeyData kekKeyData = null;
+ byte[] keyBytes = trailer.getEncryptionKey();
+ Encryption.Context cryptoContext = Encryption.Context.NONE;
+ // Check for any key material available
+ if (keyBytes != null) {
+ cryptoContext = Encryption.newContext(conf);
+ Key kek = null;
+ // When the KEK medata is available, we will try to unwrap the encrypted key using the KEK,
+ // otherwise we will use the system keys starting from the latest to the oldest.
+ if (trailer.getKEKMetadata() != null) {
+ if (managedKeyDataCache == null) {
+ throw new IOException("Key management is enabled, but ManagedKeyDataCache is null");
+ }
+ Throwable cause = null;
+ try {
+ kekKeyData = managedKeyDataCache.getEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES,
+ trailer.getKeyNamespace(), trailer.getKEKMetadata(), keyBytes);
+ } catch (KeyException | IOException e) {
+ cause = e;
+ }
+ // When getEntry returns null we treat it the same as exception case.
+ if (kekKeyData == null) {
+ throw new IOException(
+ "Failed to get key data for KEK metadata: " + trailer.getKEKMetadata(), cause);
+ }
+ kek = kekKeyData.getTheKey();
+ } else {
+ if (SecurityUtil.isKeyManagementEnabled(conf)) {
+ if (systemKeyCache == null) {
+ throw new IOException("Key management is enabled, but SystemKeyCache is null");
+ }
+ ManagedKeyData systemKeyData =
+ systemKeyCache.getSystemKeyByChecksum(trailer.getKEKChecksum());
+ if (systemKeyData == null) {
+ throw new IOException(
+ "Failed to get system key by checksum: " + trailer.getKEKChecksum());
+ }
+ kek = systemKeyData.getTheKey();
+ kekKeyData = systemKeyData;
+ }
+ }
+ Key key;
+ if (kek != null) {
+ try {
+ key = EncryptionUtil.unwrapKey(conf, null, keyBytes, kek);
+ } catch (KeyException | IOException e) {
+ throw new IOException("Failed to unwrap key with KEK checksum: "
+ + trailer.getKEKChecksum() + ", metadata: " + trailer.getKEKMetadata(), e);
+ }
+ } else {
+ key = EncryptionUtil.unwrapKey(conf, keyBytes);
+ }
+ // Use the algorithm the key wants
+ Cipher cipher = getCipherIfValid(conf, key.getAlgorithm(), key, null);
+ cryptoContext.setCipher(cipher);
+ cryptoContext.setKey(key);
+ cryptoContext.setKEKData(kekKeyData);
+ }
+ return cryptoContext;
+ }
+
+ /**
+ * Get the cipher if the cipher name is valid, otherwise throw an exception.
+ * @param conf the configuration
+ * @param cipherName the cipher name to check
+ * @param key the key to check
+ * @param familyName the family name
+ * @return the cipher if the cipher name is valid
+ * @throws IllegalStateException if the cipher name is not valid
+ */
+ private static Cipher getCipherIfValid(Configuration conf, String cipherName, Key key,
+ String familyName) {
+ // Fail if misconfigured
+ // We use the encryption type specified in the column schema as a sanity check
+ // on
+ // what the wrapped key is telling us
+ if (key != null && !key.getAlgorithm().equalsIgnoreCase(cipherName)) {
+ throw new IllegalStateException(
+ "Encryption for family '" + familyName + "' configured with type '" + cipherName
+ + "' but key specifies algorithm '" + key.getAlgorithm() + "'");
+ }
+ // Use the algorithm the key wants
+ Cipher cipher = Encryption.getCipher(conf, cipherName);
+ if (cipher == null) {
+ throw new IllegalStateException("Cipher '" + cipherName + "' is not available");
+ }
+ return cipher;
+ }
+
/**
* From the given configuration, determine if key management is enabled.
* @param conf the configuration to check
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 564c46ad5bf6..db7a9422b75e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -90,20 +91,27 @@ public static RegionInfo[] createRegionInfos(TableDescriptor tableDescriptor,
* @param newRegions {@link RegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
*/
- public static List createRegions(final Configuration conf, final Path rootDir,
+ public static List createRegions(final MasterProcedureEnv env, final Path rootDir,
final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task)
throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
- ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf,
+ ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(env.getMasterConfiguration(),
"RegionOpenAndInit-" + tableDescriptor.getTableName(), regionNumber);
try {
- return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task);
+ return createRegions(exec, env.getMasterConfiguration(), env, rootDir, tableDescriptor,
+ newRegions, task);
} finally {
exec.shutdownNow();
}
}
+ public static List createRegions(final ThreadPoolExecutor exec,
+ final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor,
+ final RegionInfo[] newRegions, final RegionFillTask task) throws IOException {
+ return createRegions(exec, conf, null, rootDir, tableDescriptor, newRegions, task);
+ }
+
/**
* Create new set of regions on the specified file-system. NOTE: that you should add the regions
* to hbase:meta after this operation.
@@ -115,8 +123,9 @@ public static List createRegions(final Configuration conf, final Pat
* @param task {@link RegionFillTask} custom code to populate region after creation
*/
public static List createRegions(final ThreadPoolExecutor exec,
- final Configuration conf, final Path rootDir, final TableDescriptor tableDescriptor,
- final RegionInfo[] newRegions, final RegionFillTask task) throws IOException {
+ final Configuration conf, final MasterProcedureEnv env, final Path rootDir,
+ final TableDescriptor tableDescriptor, final RegionInfo[] newRegions, final RegionFillTask task)
+ throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
CompletionService completionService = new ExecutorCompletionService<>(exec);
@@ -125,7 +134,7 @@ public static List createRegions(final ThreadPoolExecutor exec,
completionService.submit(new Callable() {
@Override
public RegionInfo call() throws IOException {
- return createRegion(conf, rootDir, tableDescriptor, newRegion, task);
+ return createRegion(conf, env, rootDir, tableDescriptor, newRegion, task);
}
});
}
@@ -151,15 +160,16 @@ public RegionInfo call() throws IOException {
* @param newRegion {@link RegionInfo} that describes the region to create
* @param task {@link RegionFillTask} custom code to populate region after creation
*/
- public static RegionInfo createRegion(final Configuration conf, final Path rootDir,
- final TableDescriptor tableDescriptor, final RegionInfo newRegion, final RegionFillTask task)
- throws IOException {
+ public static RegionInfo createRegion(final Configuration conf, final MasterProcedureEnv env,
+ final Path rootDir, final TableDescriptor tableDescriptor, final RegionInfo newRegion,
+ final RegionFillTask task) throws IOException {
// 1. Create HRegion
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
Configuration confForWAL = new Configuration(conf);
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
- HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false);
+ HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false,
+ null, env == null ? null : env.getMasterServices());
try {
// 2. Custom user code to interact with the created region
if (task != null) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
index caccf816c8a3..f3b2e2ca1ade 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
@@ -19,6 +19,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
import org.junit.After;
import org.junit.Before;
@@ -29,7 +30,7 @@ public class ManagedKeyTestBase {
@Before
public void setUp() throws Exception {
TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
- MockManagedKeyProvider.class.getName());
+ getKeyProviderClass().getName());
TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes",
KeymetaServiceEndpoint.class.getName());
@@ -44,4 +45,8 @@ public void setUp() throws Exception {
public void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
+
+ protected Class extends ManagedKeyProvider> getKeyProviderClass() {
+ return MockManagedKeyProvider.class;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
index d476e0619ca4..8ae91de6588f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.keymeta;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
@@ -25,7 +26,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.ClassRule;
@@ -45,22 +46,36 @@ public void testGetKeyProviderWithInvalidProvider() throws Exception {
conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
"org.apache.hadoop.hbase.keymeta.DummyKeyProvider");
- Server mockServer = mock(Server.class);
+ MasterServices mockServer = mock(MasterServices.class);
when(mockServer.getConfiguration()).thenReturn(conf);
- KeyManagementBase keyMgmt = new TestKeyManagement(mockServer);
+ final KeyManagementBase keyMgmt = new TestKeyManagement(mockServer);
+ assertEquals(mockServer, keyMgmt.getKeyManagementService());
// Should throw RuntimeException when provider is not ManagedKeyProvider
RuntimeException exception = assertThrows(RuntimeException.class, () -> {
keyMgmt.getKeyProvider();
});
-
assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider"));
+ exception = assertThrows(RuntimeException.class, () -> {
+ KeyManagementBase keyMgmt2 = new TestKeyManagement(conf);
+ keyMgmt2.getKeyProvider();
+ });
+ assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider"));
+
+ assertThrows(IllegalArgumentException.class, () -> {
+ Configuration configuration = null;
+ new TestKeyManagement(configuration);
+ });
}
private static class TestKeyManagement extends KeyManagementBase {
- public TestKeyManagement(Server server) {
+ public TestKeyManagement(MasterServices server) {
super(server);
}
+
+ public TestKeyManagement(Configuration configuration) {
+ super(configuration);
+ }
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java
new file mode 100644
index 000000000000..3fe669f90d80
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+import static org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils.SeekableByteArrayInputStream;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestKeyManagementService {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeyManagementService.class);
+
+ @Rule
+ public TestName name = new TestName();
+
+ protected Configuration conf = new Configuration();
+ protected FileSystem mockFileSystem = mock(FileSystem.class);
+
+ @Before
+ public void setUp() throws Exception {
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName());
+ conf.set(HConstants.HBASE_ORIGINAL_DIR, "/tmp/hbase");
+ }
+
+ @Test
+ public void testDefaultKeyManagementServiceCreation() throws IOException {
+ // SystemKeyCache needs at least one valid key to be created, so setting up a mock FS that
+ // returns a mock file that returns a known mocked key metadata.
+ MockManagedKeyProvider provider = (MockManagedKeyProvider) Encryption.getKeyProvider(conf);
+ ManagedKeyData keyData =
+ provider.getManagedKey("system".getBytes(), ManagedKeyData.KEY_SPACE_GLOBAL);
+ String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
+ Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
+ FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName);
+
+ // Create a real FSDataInputStream that contains the key metadata in UTF format
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ DataOutputStream dos = new DataOutputStream(baos);
+ dos.writeUTF(keyData.getKeyMetadata());
+ dos.close();
+
+ SeekableByteArrayInputStream seekableStream =
+ new SeekableByteArrayInputStream(baos.toByteArray());
+ FSDataInputStream realStream = new FSDataInputStream(seekableStream);
+
+ when(mockFileSystem.open(eq(mockFileStatus.getPath()))).thenReturn(realStream);
+ when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))))
+ .thenReturn(new FileStatus[] { mockFileStatus });
+
+ KeyManagementService service = KeyManagementService.createDefault(conf, mockFileSystem);
+ assertNotNull(service);
+ assertNotNull(service.getSystemKeyCache());
+ assertNotNull(service.getManagedKeyDataCache());
+ assertThrows(UnsupportedOperationException.class, () -> service.getKeymetaAdmin());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java
new file mode 100644
index 000000000000..1012d2b5a08f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyNamespaceUtil.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.StoreContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MiscTests.class, SmallTests.class })
+public class TestKeyNamespaceUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeyNamespaceUtil.class);
+
+ @Test
+ public void testConstructKeyNamespace_FromTableDescriptorAndFamilyDescriptor() {
+ TableDescriptor tableDescriptor = mock(TableDescriptor.class);
+ ColumnFamilyDescriptor familyDescriptor = mock(ColumnFamilyDescriptor.class);
+ when(tableDescriptor.getTableName()).thenReturn(TableName.valueOf("test"));
+ when(familyDescriptor.getNameAsString()).thenReturn("family");
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(tableDescriptor, familyDescriptor);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromStoreContext() {
+ // Test store context path construction
+ TableName tableName = TableName.valueOf("test");
+ RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
+ HRegionFileSystem regionFileSystem = mock(HRegionFileSystem.class);
+ when(regionFileSystem.getRegionInfo()).thenReturn(regionInfo);
+
+ ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of("family");
+
+ StoreContext storeContext = StoreContext.getBuilder().withRegionFileSystem(regionFileSystem)
+ .withColumnFamilyDescriptor(familyDescriptor).build();
+
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeContext);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromStoreFileInfo_RegularFile() {
+ // Test both regular files and linked files
+ StoreFileInfo storeFileInfo = mock(StoreFileInfo.class);
+ when(storeFileInfo.isLink()).thenReturn(false);
+ Path path = KeymetaTestUtils.createMockPath("test", "family");
+ when(storeFileInfo.getPath()).thenReturn(path);
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeFileInfo);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromStoreFileInfo_LinkedFile() {
+ // Test both regular files and linked files
+ StoreFileInfo storeFileInfo = mock(StoreFileInfo.class);
+ HFileLink link = mock(HFileLink.class);
+ when(storeFileInfo.isLink()).thenReturn(true);
+ Path path = KeymetaTestUtils.createMockPath("test", "family");
+ when(link.getOriginPath()).thenReturn(path);
+ when(storeFileInfo.getLink()).thenReturn(link);
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(storeFileInfo);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromPath() {
+ // Test path parsing with different HBase directory structures
+ Path path = KeymetaTestUtils.createMockPath("test", "family");
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(path);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_FromStrings() {
+ // Test string-based construction
+ String tableName = "test";
+ String family = "family";
+ String keyNamespace = KeyNamespaceUtil.constructKeyNamespace(tableName, family);
+ assertEquals("test/family", keyNamespace);
+ }
+
+ @Test
+ public void testConstructKeyNamespace_NullChecks() {
+ // Test null inputs for both table name and family
+ assertThrows(NullPointerException.class,
+ () -> KeyNamespaceUtil.constructKeyNamespace(null, "family"));
+ assertThrows(NullPointerException.class,
+ () -> KeyNamespaceUtil.constructKeyNamespace("test", null));
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
index bc8e14fe4b3d..7c884bdd27e4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
@@ -148,9 +148,7 @@ public void testConvertToKeyCustBytesInvalid() {
public void testGetResponseBuilder() {
// Arrange
String keyCust = Base64.getEncoder().encodeToString("testKey".getBytes());
- String keyNamespace = "testNamespace";
- ManagedKeysRequest request =
- requestBuilder.setKeyCust(keyCust).setKeyNamespace(keyNamespace).build();
+ ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust).build();
// Act
ManagedKeysResponse.Builder result =
@@ -158,7 +156,6 @@ public void testGetResponseBuilder() {
// Assert
assertNotNull(result);
- assertEquals(keyNamespace, result.getKeyNamespace());
assertArrayEquals("testKey".getBytes(), result.getKeyCustBytes().toByteArray());
verify(controller, never()).setFailed(anyString());
}
@@ -167,9 +164,7 @@ public void testGetResponseBuilder() {
public void testGetResponseBuilderWithInvalidBase64() {
// Arrange
String keyCust = "invalidBase64!";
- String keyNamespace = "testNamespace";
- ManagedKeysRequest request =
- requestBuilder.setKeyCust(keyCust).setKeyNamespace(keyNamespace).build();
+ ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust).build();
// Act
ManagedKeysResponse.Builder result =
@@ -177,7 +172,6 @@ public void testGetResponseBuilderWithInvalidBase64() {
// Assert
assertNotNull(result);
- assertEquals(keyNamespace, result.getKeyNamespace());
assertEquals(KEY_FAILED, result.getKeyState());
verify(controller).setFailed(contains("Failed to decode specified prefix as Base64 string"));
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
index 12b74e1c3bcc..b695dedcdf98 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
@@ -57,7 +57,6 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@@ -69,6 +68,7 @@
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -99,7 +99,7 @@ public class TestKeymetaTableAccessor {
protected static String KEY_METADATA = "metadata1";
@Mock
- protected Server server;
+ protected MasterServices server;
@Mock
protected Connection connection;
@Mock
@@ -127,6 +127,7 @@ public void setUp() throws Exception {
when(connection.getTable(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(table);
when(server.getSystemKeyCache()).thenReturn(systemKeyCache);
when(server.getConfiguration()).thenReturn(conf);
+ when(server.getKeyManagementService()).thenReturn(server);
accessor = new KeymetaTableAccessor(server);
managedKeyProvider = new MockManagedKeyProvider();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
index 61678e316ceb..807586a9a476 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
@@ -425,7 +425,7 @@ public void testActiveKeysCacheRetrievalFromL2Cache() throws Exception {
@Test
public void testGenericCacheWithKeymetaAccessorException() throws Exception {
when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata"))
- .thenThrow(new IOException("Test exception"));
+ .thenThrow(new IOException("Test exception"));
assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null));
verify(mockL2).getKey(any(), any(String.class), any(String.class));
clearInvocations(mockL2);
@@ -436,7 +436,7 @@ public void testGenericCacheWithKeymetaAccessorException() throws Exception {
@Test
public void testGetActiveEntryWithKeymetaAccessorException() throws Exception {
when(mockL2.getActiveKey(CUST_ID, KEY_SPACE_GLOBAL))
- .thenThrow(new IOException("Test exception"));
+ .thenThrow(new IOException("Test exception"));
assertNull(cache.getActiveEntry(CUST_ID, KEY_SPACE_GLOBAL));
verify(mockL2).getActiveKey(any(), any(String.class));
clearInvocations(mockL2);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
index 9882b823da8c..f541d4bac18c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java
@@ -47,7 +47,8 @@
import org.mockito.MockitoAnnotations;
/**
- * Tests for SystemKeyCache class
+ * Tests for SystemKeyCache class. NOTE: The createCache() method is tested in
+ * TestKeyManagementService.
*/
@Category({ MasterTests.class, SmallTests.class })
public class TestSystemKeyCache {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java
index f5c259927475..9cf69775a30e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MasterStateStoreTestBase.java
@@ -26,7 +26,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseZKTestingUtil;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -72,7 +71,7 @@ public static void setUpBeforeClass() throws Exception {
CHORE_SERVICE = new ChoreService("TestMasterStateStore");
HFILE_CLEANER_POOL = DirScanPool.getHFileCleanerScanPool(conf);
LOG_CLEANER_POOL = DirScanPool.getLogCleanerScanPool(conf);
- Server server = mock(Server.class);
+ MasterServices server = mock(MasterServices.class);
when(server.getConfiguration()).thenReturn(conf);
when(server.getServerName())
.thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 6af4dcec1ad2..5b522dc91072 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
@@ -587,8 +588,12 @@ public long flushTable(TableName tableName, List columnFamilies, long no
return 0;
}
- @Override
public long rollAllWALWriters(long nonceGroup, long nonce) throws IOException {
return 0;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return this;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index ba3387b8ad1d..81977c24b290 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
@@ -775,4 +776,9 @@ public ReplicateWALEntryResponse replicateToReplica(RpcController controller,
ReplicateWALEntryRequest request) throws ServiceException {
return null;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 02d76a9af3af..ac6d754a8396 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -33,9 +33,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
-import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
-import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskGroup;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -332,17 +330,7 @@ public ActiveMasterManager getActiveMasterManager() {
}
@Override
- public SystemKeyCache getSystemKeyCache() {
- return null;
- }
-
- @Override
- public ManagedKeyDataCache getManagedKeyDataCache() {
- return null;
- }
-
- @Override
- public KeymetaAdmin getKeymetaAdmin() {
+ public KeyManagementService getKeyManagementService() {
return null;
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
index 08ffef8e0e9f..6592238add50 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
@@ -43,7 +43,6 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
@@ -73,11 +72,12 @@
TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class, })
@Category({ MasterTests.class, SmallTests.class })
public class TestKeymetaAdminImpl {
- private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
private static final String CUST = "cust1";
private static final String ENCODED_CUST = ManagedKeyProvider.encodeToStr(CUST.getBytes());
+ private final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
@Rule
public TestName name = new TestName();
@@ -86,7 +86,7 @@ public class TestKeymetaAdminImpl {
protected FileSystem fs;
protected FileSystem mockFileSystem = mock(FileSystem.class);
- protected Server mockServer = mock(Server.class);
+ protected MasterServices mockServer = mock(MasterServices.class);
protected KeymetaAdminImplForTest keymetaAdmin;
KeymetaTableAccessor keymetaAccessor = mock(KeymetaTableAccessor.class);
@@ -99,6 +99,7 @@ public void setUp() throws Exception {
conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName());
+ when(mockServer.getKeyManagementService()).thenReturn(mockServer);
when(mockServer.getFileSystem()).thenReturn(mockFileSystem);
when(mockServer.getConfiguration()).thenReturn(conf);
keymetaAdmin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
@@ -221,7 +222,7 @@ public void test() throws Exception {
}
private class KeymetaAdminImplForTest extends KeymetaAdminImpl {
- public KeymetaAdminImplForTest(Server mockServer, KeymetaTableAccessor mockAccessor) {
+ public KeymetaAdminImplForTest(MasterServices mockServer, KeymetaTableAccessor mockAccessor) {
super(mockServer);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
index 1c4ad60a8da1..0dc765ba7291 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java
@@ -45,6 +45,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
@@ -105,14 +106,6 @@ public void setUp() throws Exception {
systemKeyManager = new SystemKeyManager(mockMaster);
}
- private static FileStatus createMockFile(String fileName) {
- Path mockPath = mock(Path.class);
- when(mockPath.getName()).thenReturn(fileName);
- FileStatus mockFileStatus = mock(FileStatus.class);
- when(mockFileStatus.getPath()).thenReturn(mockPath);
- return mockFileStatus;
- }
-
@RunWith(BlockJUnit4ClassRunner.class)
@Category({ MasterTests.class, SmallTests.class })
public static class TestAccessorWhenDisabled extends TestSystemKeyAccessorAndManager {
@@ -128,8 +121,8 @@ public void setUp() throws Exception {
@Test
public void test() throws Exception {
- assertNull(systemKeyManager.getAllSystemKeyFiles());
- assertNull(systemKeyManager.getLatestSystemKeyFile().getFirst());
+ assertThrows(IOException.class, () -> systemKeyManager.getAllSystemKeyFiles());
+ assertThrows(IOException.class, () -> systemKeyManager.getLatestSystemKeyFile().getFirst());
}
}
@@ -164,15 +157,15 @@ public static class TestAccessor extends TestSystemKeyAccessorAndManager {
public void testGetLatestWithNone() throws Exception {
when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]);
- RuntimeException ex = assertThrows(RuntimeException.class,
- () -> systemKeyManager.getLatestSystemKeyFile());
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> systemKeyManager.getLatestSystemKeyFile());
assertEquals("No cluster key initialized yet", ex.getMessage());
}
@Test
public void testGetWithSingle() throws Exception {
String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
- FileStatus mockFileStatus = createMockFile(fileName);
+ FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName);
Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))))
@@ -192,7 +185,8 @@ public void testGetWithSingle() throws Exception {
@Test
public void testGetWithMultiple() throws Exception {
FileStatus[] mockFileStatuses = IntStream.rangeClosed(1, 3)
- .mapToObj(i -> createMockFile(SYSTEM_KEY_FILE_PREFIX + i)).toArray(FileStatus[]::new);
+ .mapToObj(i -> KeymetaTestUtils.createMockFile(SYSTEM_KEY_FILE_PREFIX + i))
+ .toArray(FileStatus[]::new);
Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + "*"))))
@@ -208,7 +202,8 @@ public void testGetWithMultiple() throws Exception {
@Test
public void testExtractKeySequenceForInvalidFilename() throws Exception {
- assertEquals(-1, SystemKeyAccessor.extractKeySequence(createMockFile("abcd").getPath()));
+ assertEquals(-1,
+ SystemKeyAccessor.extractKeySequence(KeymetaTestUtils.createMockFile("abcd").getPath()));
}
}
@@ -235,7 +230,7 @@ public static Collection data() {
@Test
public void test() throws Exception {
- FileStatus mockFileStatus = createMockFile(fileName);
+ FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName);
IOException ex = assertThrows(IOException.class,
() -> SystemKeyAccessor.extractSystemKeySeqNum(mockFileStatus.getPath()));
@@ -345,7 +340,7 @@ public void testEnsureSystemKeyInitialized_RaceCondition() throws Exception {
when(mockFileSystem.create(any())).thenReturn(mockStream);
when(mockFileSystem.rename(any(), any())).thenReturn(false);
String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
- FileStatus mockFileStatus = createMockFile(fileName);
+ FileStatus mockFileStatus = KeymetaTestUtils.createMockFile(fileName);
when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0],
new FileStatus[] { mockFileStatus });
@@ -498,6 +493,16 @@ public void testExtractKeySequenceEdgeCases() throws Exception {
assertEquals(0, SystemKeyAccessor.extractKeySequence(validZero));
assertEquals(-1, SystemKeyAccessor.extractKeySequence(validNegative));
}
+
+ @Test
+ public void testCreateCacheFactoryMethod() {
+ // Test static factory method
+ }
+
+ @Test
+ public void testCreateCacheWithNoKeys() {
+ // Test behavior when no system keys are available
+ }
}
private static class MockSystemKeyManager extends SystemKeyManager {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index a5e3dd1a5b83..ab99c55e6255 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -38,9 +38,7 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
-import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
-import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
@@ -218,21 +216,6 @@ public Connection getConnection() {
}
}
- @Override
- public SystemKeyCache getSystemKeyCache() {
- return null;
- }
-
- @Override
- public ManagedKeyDataCache getManagedKeyDataCache() {
- return null;
- }
-
- @Override
- public KeymetaAdmin getKeymetaAdmin() {
- return null;
- }
-
@Override
public FileSystem getFileSystem() {
try {
@@ -241,5 +224,10 @@ public FileSystem getFileSystem() {
throw new UncheckedIOException(e);
}
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
index 0526fd3ba70c..9ea11f732310 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/MasterRegionTestBase.java
@@ -26,12 +26,12 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
@@ -53,7 +53,7 @@ public class MasterRegionTestBase {
protected DirScanPool logCleanerPool;
- protected Server server;
+ protected MasterServices server;
protected static byte[] CF1 = Bytes.toBytes("f1");
@@ -96,7 +96,7 @@ protected final void createMasterRegion() throws IOException {
choreService = new ChoreService(getClass().getSimpleName());
hfileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf);
logCleanerPool = DirScanPool.getLogCleanerScanPool(conf);
- server = mock(Server.class);
+ server = mock(MasterServices.class);
when(server.getConfiguration()).thenReturn(conf);
when(server.getServerName())
.thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
index 8f11cc415058..80792d4b276d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/region/TestMasterRegionOnTwoFileSystems.java
@@ -40,7 +40,6 @@
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -49,6 +48,7 @@
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
@@ -119,7 +119,7 @@ public static void tearDown() throws IOException {
}
private MasterRegion createMasterRegion(ServerName serverName) throws IOException {
- Server server = mock(Server.class);
+ MasterServices server = mock(MasterServices.class);
when(server.getConfiguration()).thenReturn(HFILE_UTIL.getConfiguration());
when(server.getServerName()).thenReturn(serverName);
MasterRegionParams params = new MasterRegionParams();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
index 790435f6a47e..779ca4dac6c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
@@ -24,9 +24,7 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
-import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
-import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
-import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.master.MockNoopMasterServices;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
import org.apache.hadoop.hbase.procedure2.store.ProcedureStorePerformanceEvaluation;
@@ -34,47 +32,24 @@
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.MockServer;
import org.apache.hadoop.hbase.util.Pair;
public class RegionProcedureStorePerformanceEvaluation
extends ProcedureStorePerformanceEvaluation {
- private static final class DummyServer extends MockServer {
-
- private final Configuration conf;
+ private static final class DummyServer extends MockNoopMasterServices {
private final ServerName serverName =
ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime());
public DummyServer(Configuration conf) {
- this.conf = conf;
- }
-
- @Override
- public Configuration getConfiguration() {
- return conf;
+ super(conf);
}
@Override
public ServerName getServerName() {
return serverName;
}
-
- @Override
- public SystemKeyCache getSystemKeyCache() {
- return null;
- }
-
- @Override
- public ManagedKeyDataCache getManagedKeyDataCache() {
- return null;
- }
-
- @Override
- public KeymetaAdmin getKeymetaAdmin() {
- return null;
- }
}
private MasterRegion region;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java
index c05eb9a8ce3e..dac4cc1e0e73 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestBase.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
-import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.LoadCounter;
@@ -51,7 +51,7 @@ public void setUp() throws IOException {
conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false);
Path testDir = htu.getDataTestDir();
CommonFSUtils.setRootDir(htu.getConfiguration(), testDir);
- Server server = RegionProcedureStoreTestHelper.mockServer(conf);
+ MasterServices server = RegionProcedureStoreTestHelper.mockServer(conf);
region = MasterRegionFactory.create(server);
store = RegionProcedureStoreTestHelper.createStore(server, region, new LoadCounter());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
index 0607d9d3e924..cc90d6e22b61 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStoreTestHelper.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.procedure2.store.LeaseRecovery;
import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoader;
@@ -36,8 +37,8 @@ final class RegionProcedureStoreTestHelper {
private RegionProcedureStoreTestHelper() {
}
- static Server mockServer(Configuration conf) {
- Server server = mock(Server.class);
+ static MasterServices mockServer(Configuration conf) {
+ MasterServices server = mock(MasterServices.class);
when(server.getConfiguration()).thenReturn(conf);
when(server.getServerName())
.thenReturn(ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime()));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
index 7a6fee5f314c..70b93487c12b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/TestRegionProcedureStoreMigration.java
@@ -35,9 +35,9 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.assignment.AssignProcedure;
import org.apache.hadoop.hbase.master.region.MasterRegion;
import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
@@ -66,7 +66,7 @@ public class TestRegionProcedureStoreMigration {
private HBaseCommonTestingUtil htu;
- private Server server;
+ private MasterServices server;
private MasterRegion region;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 22be21811950..9b6a5d80c9ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -46,9 +46,7 @@
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
-import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
-import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
-import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext;
import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -840,21 +838,6 @@ public ChoreService getChoreService() {
return null;
}
- @Override
- public SystemKeyCache getSystemKeyCache() {
- return null;
- }
-
- @Override
- public ManagedKeyDataCache getManagedKeyDataCache() {
- return null;
- }
-
- @Override
- public KeymetaAdmin getKeymetaAdmin() {
- return null;
- }
-
@Override
public FileSystem getFileSystem() {
return null;
@@ -874,6 +857,11 @@ public Connection createConnection(Configuration conf) throws IOException {
public AsyncClusterConnection getAsyncClusterConnection() {
return null;
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
static class CustomHeapMemoryTuner implements HeapMemoryTuner {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java
index 6b372fa99350..1a4ba7ac99cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEditsReplayAndAbort.java
@@ -125,7 +125,8 @@ public void test() throws Exception {
Path rootDir = TEST_UTIL.getDataTestDir();
Path tableDir = CommonFSUtils.getTableDir(rootDir, info.getTable());
HRegionFileSystem.createRegionOnFileSystem(CONF, TEST_UTIL.getTestFileSystem(), tableDir, info);
- region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs);
+ region = HRegion.newHRegion(tableDir, wal, TEST_UTIL.getTestFileSystem(), CONF, info, htd, rs,
+ rs.getKeyManagementService());
// create some recovered.edits
final WALFactory wals = new WALFactory(CONF, method);
try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
index ffc4e17f6f8b..29040ad58bec 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java
@@ -122,7 +122,8 @@ public void testOpenErrorMessageReference() throws IOException {
storeFileTrackerForTest.createReference(r, p);
StoreFileInfo sfi = storeFileTrackerForTest.getStoreFileInfo(p, true);
try {
- ReaderContext context = sfi.createReaderContext(false, 1000, ReaderType.PREAD);
+ ReaderContext context =
+ sfi.createReaderContext(false, 1000, ReaderType.PREAD, null, null, null);
sfi.createReader(context, null);
throw new IllegalStateException();
} catch (FileNotFoundException fnfe) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java
new file mode 100644
index 000000000000..ca2f8088a786
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java
@@ -0,0 +1,751 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.security;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
+import java.util.Arrays;
+import java.util.Collection;
+import javax.crypto.spec.SecretKeySpec;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.io.crypto.Cipher;
+import org.apache.hadoop.hbase.io.crypto.CipherProvider;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.MockAesKeyProvider;
+import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.testclassification.SecurityTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Suite;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({ TestSecurityUtil.TestBasic.class,
+ TestSecurityUtil.TestCreateEncryptionContext_ForWrites.class,
+ TestSecurityUtil.TestCreateEncryptionContext_ForReads.class,
+ TestSecurityUtil.TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException.class, })
+@Category({ SecurityTests.class, SmallTests.class })
+public class TestSecurityUtil {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSecurityUtil.class);
+
+ // Test constants to eliminate magic strings and improve maintainability
+ protected static final String TEST_NAMESPACE = "test-namespace";
+ protected static final String TEST_FAMILY = "test-family";
+ protected static final String HBASE_KEY = "hbase";
+ protected static final String TEST_KEK_METADATA = "test-kek-metadata";
+ protected static final long TEST_KEK_CHECKSUM = 12345L;
+ protected static final String TEST_KEY_16_BYTE = "test-key-16-byte";
+ protected static final String TEST_DEK_16_BYTE = "test-dek-16-byte";
+ protected static final String INVALID_KEY_DATA = "invalid-key-data";
+ protected static final String INVALID_WRAPPED_KEY_DATA = "invalid-wrapped-key-data";
+ protected static final String INVALID_SYSTEM_KEY_DATA = "invalid-system-key-data";
+ protected static final String UNKNOWN_CIPHER = "UNKNOWN_CIPHER";
+ protected static final String AES_CIPHER = "AES";
+ protected static final String DES_CIPHER = "DES";
+
+ protected Configuration conf;
+ protected HBaseTestingUtil testUtil;
+ protected Path testPath;
+ protected ColumnFamilyDescriptor mockFamily;
+ protected ManagedKeyDataCache mockManagedKeyDataCache;
+ protected SystemKeyCache mockSystemKeyCache;
+ protected FixedFileTrailer mockTrailer;
+ protected ManagedKeyData mockManagedKeyData;
+ protected Key testKey;
+ protected byte[] testWrappedKey;
+ protected Key kekKey;
+
+ /**
+ * Configuration builder for setting up different encryption test scenarios.
+ */
+ protected static class TestConfigBuilder {
+ private boolean encryptionEnabled = true;
+ private boolean keyManagementEnabled = false;
+ private boolean localKeyGenEnabled = false;
+ private String cipherProvider = "org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider";
+ private String keyProvider = MockAesKeyProvider.class.getName();
+ private String masterKeyName = HBASE_KEY;
+
+ public TestConfigBuilder withEncryptionEnabled(boolean enabled) {
+ this.encryptionEnabled = enabled;
+ return this;
+ }
+
+ public TestConfigBuilder withKeyManagement(boolean enabled, boolean localKeyGen) {
+ this.keyManagementEnabled = enabled;
+ this.localKeyGenEnabled = localKeyGen;
+ return this;
+ }
+
+ public TestConfigBuilder withNullCipherProvider() {
+ this.cipherProvider = NullCipherProvider.class.getName();
+ return this;
+ }
+
+ public void apply(Configuration conf) {
+ conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, encryptionEnabled);
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, keyProvider);
+ conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, masterKeyName);
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "true");
+ conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, cipherProvider);
+
+ if (keyManagementEnabled) {
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY,
+ localKeyGenEnabled);
+ } else {
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+ }
+ }
+ }
+
+ protected static TestConfigBuilder configBuilder() {
+ return new TestConfigBuilder();
+ }
+
+ protected void setUpEncryptionConfig() {
+ // Set up real encryption configuration using default AES cipher
+ conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, true);
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockAesKeyProvider.class.getName());
+ conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
+ // Enable key caching
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "true");
+ // Use DefaultCipherProvider for real AES encryption functionality
+ conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY,
+ "org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider");
+ }
+
+ protected void setUpEncryptionConfigWithNullCipher() {
+ configBuilder().withNullCipherProvider().apply(conf);
+ }
+
+ // ==== Mock Setup Helpers ====
+
+ protected void setupManagedKeyDataCache(String namespace, ManagedKeyData keyData) {
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(namespace))).thenReturn(keyData);
+ }
+
+ protected void setupManagedKeyDataCache(String namespace, String globalSpace,
+ ManagedKeyData keyData) {
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(namespace))).thenReturn(null);
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(globalSpace))).thenReturn(keyData);
+ }
+
+ protected void setupTrailerMocks(byte[] keyBytes, String metadata, Long checksum,
+ String namespace) {
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(metadata);
+ if (checksum != null) {
+ when(mockTrailer.getKEKChecksum()).thenReturn(checksum);
+ }
+ when(mockTrailer.getKeyNamespace()).thenReturn(namespace);
+ }
+
+ protected void setupSystemKeyCache(Long checksum, ManagedKeyData keyData) {
+ when(mockSystemKeyCache.getSystemKeyByChecksum(checksum)).thenReturn(keyData);
+ }
+
+ protected void setupSystemKeyCache(ManagedKeyData latestKey) {
+ when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(latestKey);
+ }
+
+ protected void setupManagedKeyDataCacheEntry(String namespace, String metadata, byte[] keyBytes,
+ ManagedKeyData keyData) throws IOException, KeyException {
+ when(mockManagedKeyDataCache.getEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(namespace), eq(metadata), eq(keyBytes))).thenReturn(keyData);
+ }
+
+ // ==== Exception Testing Helpers ====
+
+ protected void assertExceptionContains(Class expectedType,
+ String expectedMessage, Runnable testCode) {
+ T exception = assertThrows(expectedType, () -> testCode.run());
+ assertTrue("Exception message should contain: " + expectedMessage,
+ exception.getMessage().contains(expectedMessage));
+ }
+
+ protected void assertEncryptionContextThrowsForWrites(Class extends Exception> expectedType,
+ String expectedMessage) {
+ Exception exception = assertThrows(Exception.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, mockFamily, mockManagedKeyDataCache,
+ mockSystemKeyCache, TEST_NAMESPACE);
+ });
+ assertTrue("Expected exception type: " + expectedType.getName() + ", but got: "
+ + exception.getClass().getName(), expectedType.isInstance(exception));
+ assertTrue("Exception message should contain: " + expectedMessage,
+ exception.getMessage().contains(expectedMessage));
+ }
+
+ protected void assertEncryptionContextThrowsForReads(Class extends Exception> expectedType,
+ String expectedMessage) {
+ Exception exception = assertThrows(Exception.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+ assertTrue("Expected exception type: " + expectedType.getName() + ", but got: "
+ + exception.getClass().getName(), expectedType.isInstance(exception));
+ assertTrue("Exception message should contain: " + expectedMessage,
+ exception.getMessage().contains(expectedMessage));
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ conf = HBaseConfiguration.create();
+ testUtil = new HBaseTestingUtil(conf);
+ testPath = testUtil.getDataTestDir("test-file");
+
+ // Setup mocks (only for objects that don't have encryption logic)
+ mockFamily = mock(ColumnFamilyDescriptor.class);
+ mockManagedKeyDataCache = mock(ManagedKeyDataCache.class);
+ mockSystemKeyCache = mock(SystemKeyCache.class);
+ mockTrailer = mock(FixedFileTrailer.class);
+ mockManagedKeyData = mock(ManagedKeyData.class);
+
+ // Use a real test key with exactly 16 bytes for AES-128
+ testKey = new SecretKeySpec(TEST_KEY_16_BYTE.getBytes(), AES_CIPHER);
+
+ // Configure mocks
+ when(mockFamily.getEncryptionType()).thenReturn(AES_CIPHER);
+ when(mockFamily.getNameAsString()).thenReturn(TEST_FAMILY);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ // Set up default encryption config
+ setUpEncryptionConfig();
+
+ // Create test wrapped key
+ KeyProvider keyProvider = Encryption.getKeyProvider(conf);
+ kekKey = keyProvider.getKey(HBASE_KEY);
+ Key key = keyProvider.getKey(TEST_DEK_16_BYTE);
+ testWrappedKey = EncryptionUtil.wrapKey(conf, null, key, kekKey);
+ }
+
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ SecurityTests.class, SmallTests.class })
+ public static class TestBasic extends TestSecurityUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestBasic.class);
+
+ @Test
+ public void testGetUserFromPrincipal() {
+ // Test with slash separator
+ assertEquals("user1", SecurityUtil.getUserFromPrincipal("user1/host@REALM"));
+ assertEquals("user2", SecurityUtil.getUserFromPrincipal("user2@REALM"));
+
+ // Test with no realm
+ assertEquals("user3", SecurityUtil.getUserFromPrincipal("user3"));
+
+ // Test with multiple slashes
+ assertEquals("user4", SecurityUtil.getUserFromPrincipal("user4/host1/host2@REALM"));
+ }
+
+ @Test
+ public void testGetPrincipalWithoutRealm() {
+ // Test with realm
+ assertEquals("user1/host", SecurityUtil.getPrincipalWithoutRealm("user1/host@REALM"));
+ assertEquals("user2", SecurityUtil.getPrincipalWithoutRealm("user2@REALM"));
+
+ // Test without realm
+ assertEquals("user3", SecurityUtil.getPrincipalWithoutRealm("user3"));
+ assertEquals("user4/host", SecurityUtil.getPrincipalWithoutRealm("user4/host"));
+ }
+
+ @Test
+ public void testIsKeyManagementEnabled() {
+ Configuration conf = HBaseConfiguration.create();
+
+ // Test default behavior (should be false)
+ assertFalse(SecurityUtil.isKeyManagementEnabled(conf));
+
+ // Test with key management enabled
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ assertTrue(SecurityUtil.isKeyManagementEnabled(conf));
+
+ // Test with key management disabled
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+ assertFalse(SecurityUtil.isKeyManagementEnabled(conf));
+ }
+ }
+
+ // Tests for the first createEncryptionContext method (for ColumnFamilyDescriptor)
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ SecurityTests.class, SmallTests.class })
+ public static class TestCreateEncryptionContext_ForWrites extends TestSecurityUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestCreateEncryptionContext_ForWrites.class);
+
+ @Test
+ public void testWithNoEncryptionOnFamily() throws IOException {
+ when(mockFamily.getEncryptionType()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache, "test-namespace");
+
+ assertEquals(Encryption.Context.NONE, result);
+ }
+
+ @Test
+ public void testWithEncryptionDisabled() throws IOException {
+ configBuilder().withEncryptionEnabled(false).apply(conf);
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "encryption feature is disabled");
+ }
+
+ @Test
+ public void testWithKeyManagement_LocalKeyGen() throws IOException {
+ configBuilder().withKeyManagement(true, true).apply(conf);
+ setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+
+ verifyContext(result);
+ }
+
+ @Test
+ public void testWithKeyManagement_NoActiveKey() throws IOException {
+ configBuilder().withKeyManagement(true, false).apply(conf);
+ setupManagedKeyDataCache(TEST_NAMESPACE, ManagedKeyData.KEY_SPACE_GLOBAL, null);
+ assertEncryptionContextThrowsForWrites(IOException.class, "No active key found");
+ }
+
+ @Test
+ public void testWithKeyManagement_LocalKeyGen_WithUnknownKeyCipher() throws IOException {
+ when(mockFamily.getEncryptionType()).thenReturn(UNKNOWN_CIPHER);
+ Key unknownKey = mock(Key.class);
+ when(unknownKey.getAlgorithm()).thenReturn(UNKNOWN_CIPHER);
+ when(mockManagedKeyData.getTheKey()).thenReturn(unknownKey);
+
+ configBuilder().withKeyManagement(true, true).apply(conf);
+ setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData);
+ assertEncryptionContextThrowsForWrites(RuntimeException.class,
+ "Cipher 'UNKNOWN_CIPHER' is not");
+ }
+
+ @Test
+ public void testWithKeyManagement_LocalKeyGen_WithKeyAlgorithmMismatch() throws IOException {
+ Key desKey = mock(Key.class);
+ when(desKey.getAlgorithm()).thenReturn(DES_CIPHER);
+ when(mockManagedKeyData.getTheKey()).thenReturn(desKey);
+
+ configBuilder().withKeyManagement(true, true).apply(conf);
+ setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData);
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "Encryption for family 'test-family' configured with type 'AES' but key specifies algorithm 'DES'");
+ }
+
+ @Test
+ public void testWithKeyManagement_UseSystemKeyWithNSSpecificActiveKey() throws IOException {
+ configBuilder().withKeyManagement(true, false).apply(conf);
+ setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData);
+ setupSystemKeyCache(mockManagedKeyData);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+
+ verifyContext(result);
+ }
+
+ @Test
+ public void testWithKeyManagement_UseSystemKeyWithoutNSSpecificActiveKey() throws IOException {
+ configBuilder().withKeyManagement(true, false).apply(conf);
+ setupManagedKeyDataCache(TEST_NAMESPACE, ManagedKeyData.KEY_SPACE_GLOBAL, mockManagedKeyData);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+
+ verifyContext(result);
+ }
+
+ @Test
+ public void testWithoutKeyManagement_WithFamilyProvidedKey() throws Exception {
+ when(mockFamily.getEncryptionKey()).thenReturn(testWrappedKey);
+ configBuilder().withKeyManagement(false, false).apply(conf);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+
+ verifyContext(result, false);
+ }
+
+ @Test
+ public void testWithoutKeyManagement_KeyAlgorithmMismatch() throws Exception {
+ // Create a key with different algorithm and wrap it
+ Key differentKey = new SecretKeySpec(TEST_KEY_16_BYTE.getBytes(), DES_CIPHER);
+ byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, HBASE_KEY, differentKey);
+ when(mockFamily.getEncryptionKey()).thenReturn(wrappedDESKey);
+
+ configBuilder().withKeyManagement(false, false).apply(conf);
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "Encryption for family 'test-family' configured with type 'AES' but key specifies algorithm 'DES'");
+ }
+
+ @Test
+ public void testWithoutKeyManagement_WithRandomKeyGeneration() throws IOException {
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ configBuilder().withKeyManagement(false, false).apply(conf);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+
+ verifyContext(result, false);
+ }
+
+ @Test
+ public void testWithUnavailableCipher() throws IOException {
+ when(mockFamily.getEncryptionType()).thenReturn(UNKNOWN_CIPHER);
+ setUpEncryptionConfigWithNullCipher();
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "Cipher 'UNKNOWN_CIPHER' is not available");
+ }
+
+ // Tests for the second createEncryptionContext method (for reading files)
+
+ @Test
+ public void testWithNoKeyMaterial() throws IOException {
+ when(mockTrailer.getEncryptionKey()).thenReturn(null);
+ when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ assertEquals(Encryption.Context.NONE, result);
+ }
+ }
+
+ // Tests for the second createEncryptionContext method (for reading files)
+ @RunWith(BlockJUnit4ClassRunner.class)
+ @Category({ SecurityTests.class, SmallTests.class })
+ public static class TestCreateEncryptionContext_ForReads extends TestSecurityUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestCreateEncryptionContext_ForReads.class);
+
+ @Test
+ public void testWithKEKMetadata() throws Exception {
+ setupTrailerMocks(testWrappedKey, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, TEST_NAMESPACE);
+ setupManagedKeyDataCacheEntry(TEST_NAMESPACE, TEST_KEK_METADATA, testWrappedKey,
+ mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ }
+
+ @Test
+ public void testWithKeyManagement_KEKMetadataFailure() throws IOException, KeyException {
+ byte[] keyBytes = "test-encrypted-key".getBytes();
+ String kekMetadata = "test-kek-metadata";
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(kekMetadata);
+ when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+
+ when(mockManagedKeyDataCache.getEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq("test-namespace"), eq(kekMetadata), eq(keyBytes)))
+ .thenThrow(new IOException("Key not found"));
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("Failed to get key data"));
+ }
+
+ @Test
+ public void testWithKeyManagement_UseSystemKey() throws IOException {
+ setupTrailerMocks(testWrappedKey, null, TEST_KEK_CHECKSUM, TEST_NAMESPACE);
+ configBuilder().withKeyManagement(true, false).apply(conf);
+ setupSystemKeyCache(TEST_KEK_CHECKSUM, mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ }
+
+ @Test
+ public void testWithKeyManagement_SystemKeyNotFound() throws IOException {
+ byte[] keyBytes = "test-encrypted-key".getBytes();
+ long kekChecksum = 12345L;
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+ when(mockTrailer.getKEKChecksum()).thenReturn(kekChecksum);
+ when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+
+ // Enable key management
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+
+ when(mockSystemKeyCache.getSystemKeyByChecksum(kekChecksum)).thenReturn(null);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("Failed to get system key"));
+ }
+
+ @Test
+ public void testWithoutKeyManagemntEnabled() throws IOException {
+ when(mockTrailer.getEncryptionKey()).thenReturn(testWrappedKey);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+ when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE);
+ configBuilder().withKeyManagement(false, false).apply(conf);
+ // TODO: Get the key provider to return kek when getKeys() is called.
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result, false);
+ }
+
+ @Test
+ public void testWithoutKeyManagement_UnwrapFailure() throws IOException {
+ byte[] invalidKeyBytes = INVALID_KEY_DATA.getBytes();
+ when(mockTrailer.getEncryptionKey()).thenReturn(invalidKeyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+ when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE);
+ configBuilder().withKeyManagement(false, false).apply(conf);
+
+ Exception exception = assertThrows(Exception.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ // The exception should indicate that unwrapping failed - could be IOException or
+ // RuntimeException
+ assertNotNull(exception);
+ }
+
+ @Test
+ public void testCreateEncryptionContext_WithoutKeyManagement_UnavailableCipher()
+ throws Exception {
+ // Create a DES key and wrap it first with working configuration
+ Key desKey = new SecretKeySpec("test-key-16-byte".getBytes(), "DES");
+ byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, "hbase", desKey);
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(wrappedDESKey);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+ when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+
+ // Disable key management and use null cipher provider
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+ setUpEncryptionConfigWithNullCipher();
+
+ RuntimeException exception = assertThrows(RuntimeException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("Cipher 'AES' not available"));
+ }
+
+ @Test
+ public void testCreateEncryptionContext_WithKeyManagement_NullKeyManagementCache()
+ throws IOException {
+ byte[] keyBytes = "test-encrypted-key".getBytes();
+ String kekMetadata = "test-kek-metadata";
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(kekMetadata);
+ when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+
+ // Enable key management
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, null, mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("ManagedKeyDataCache is null"));
+ }
+
+ @Test
+ public void testCreateEncryptionContext_WithKeyManagement_NullSystemKeyCache()
+ throws IOException {
+ byte[] keyBytes = "test-encrypted-key".getBytes();
+
+ when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
+ when(mockTrailer.getKEKMetadata()).thenReturn(null);
+ when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+
+ // Enable key management
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ null);
+ });
+
+ assertTrue(exception.getMessage().contains("SystemKeyCache is null"));
+ }
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ SecurityTests.class, SmallTests.class })
+ public static class TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException
+ extends TestSecurityUtil {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule
+ .forClass(TestCreateEncryptionContext_WithoutKeyManagement_UnwrapKeyException.class);
+
+ @Parameter(0)
+ public boolean isKeyException;
+
+ @Parameterized.Parameters(name = "{index},isKeyException={0}")
+ public static Collection data() {
+ return Arrays.asList(new Object[][] { { true }, { false }, });
+ }
+
+ @Test
+ public void test() throws IOException {
+ }
+
+ @Test
+ public void testWithDEK() throws IOException, KeyException {
+ // This test is challenging because we need to create a scenario where unwrapping fails
+ // with either KeyException or IOException. We'll create invalid wrapped data.
+ byte[] invalidKeyBytes = INVALID_WRAPPED_KEY_DATA.getBytes();
+
+ setupTrailerMocks(invalidKeyBytes, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, TEST_NAMESPACE);
+ setupManagedKeyDataCacheEntry(TEST_NAMESPACE, TEST_KEK_METADATA, invalidKeyBytes,
+ mockManagedKeyData);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains("Failed to unwrap key with KEK checksum: "
+ + TEST_KEK_CHECKSUM + ", metadata: " + TEST_KEK_METADATA));
+ // The root cause should be some kind of parsing/unwrapping exception
+ assertNotNull(exception.getCause());
+ }
+
+ @Test
+ public void testWithSystemKey() throws IOException {
+ // Use invalid key bytes to trigger unwrapping failure
+ byte[] invalidKeyBytes = INVALID_SYSTEM_KEY_DATA.getBytes();
+
+ setupTrailerMocks(invalidKeyBytes, null, TEST_KEK_CHECKSUM, TEST_NAMESPACE);
+ configBuilder().withKeyManagement(true, false).apply(conf);
+ setupSystemKeyCache(TEST_KEK_CHECKSUM, mockManagedKeyData);
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
+ mockSystemKeyCache);
+ });
+
+ assertTrue(exception.getMessage().contains(
+ "Failed to unwrap key with KEK checksum: " + TEST_KEK_CHECKSUM + ", metadata: null"));
+ // The root cause should be some kind of parsing/unwrapping exception
+ assertNotNull(exception.getCause());
+ }
+ }
+
+ protected void verifyContext(Encryption.Context context) {
+ verifyContext(context, true);
+ }
+
+ protected void verifyContext(Encryption.Context context, boolean withKeyManagement) {
+ assertNotNull(context);
+ assertNotNull("Context should have a cipher", context.getCipher());
+ assertNotNull("Context should have a key", context.getKey());
+ if (withKeyManagement) {
+ assertNotNull("Context should have KEK data when key management is enabled",
+ context.getKEKData());
+ } else {
+ assertNull("Context should not have KEK data when key management is disabled",
+ context.getKEKData());
+ }
+ }
+
+ /**
+ * Null cipher provider for testing error cases.
+ */
+ public static class NullCipherProvider implements CipherProvider {
+ private Configuration conf;
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+
+ @Override
+ public String getName() {
+ return "null";
+ }
+
+ @Override
+ public String[] getSupportedCiphers() {
+ return new String[0];
+ }
+
+ @Override
+ public Cipher getCipher(String name) {
+ return null; // Always return null to simulate unavailable cipher
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 5d62d6a908c0..273385ec9c84 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -55,9 +55,7 @@
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.ipc.SimpleRpcServer;
-import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
-import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
-import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.security.SecurityInfo;
@@ -354,27 +352,17 @@ public ChoreService getChoreService() {
}
@Override
- public SystemKeyCache getSystemKeyCache() {
- return null;
- }
-
- @Override
- public ManagedKeyDataCache getManagedKeyDataCache() {
- return null;
- }
-
- @Override
- public KeymetaAdmin getKeymetaAdmin() {
+ public Connection createConnection(Configuration conf) throws IOException {
return null;
}
@Override
- public Connection createConnection(Configuration conf) throws IOException {
+ public AsyncClusterConnection getAsyncClusterConnection() {
return null;
}
@Override
- public AsyncClusterConnection getAsyncClusterConnection() {
+ public KeyManagementService getKeyManagementService() {
return null;
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
index 7b2749177889..a0246fee2955 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
@@ -178,7 +178,7 @@ public void testSkipReplayAndUpdateSeqId() throws Exception {
for (RegionInfo restoredRegion : restoredRegions) {
// open restored region
HRegion region = HRegion.newHRegion(CommonFSUtils.getTableDir(restoreDir, tableName), null,
- fs, conf, restoredRegion, htd, null);
+ fs, conf, restoredRegion, htd, null, null);
// set restore flag
region.setRestoredRegion(true);
region.initialize();
@@ -188,7 +188,7 @@ public void testSkipReplayAndUpdateSeqId() throws Exception {
// open restored region without set restored flag
HRegion region2 = HRegion.newHRegion(CommonFSUtils.getTableDir(restoreDir, tableName), null,
- fs, conf, restoredRegion, htd, null);
+ fs, conf, restoredRegion, htd, null, null);
region2.initialize();
long maxSeqId2 = WALSplitUtil.getMaxRegionSequenceId(fs, recoveredEdit);
Assert.assertTrue(maxSeqId2 > maxSeqId);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 98283db19146..77b6ceffe7ca 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -26,9 +26,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
-import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
-import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
+import org.apache.hadoop.hbase.keymeta.KeyManagementService;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.slf4j.Logger;
@@ -103,21 +101,6 @@ public ChoreService getChoreService() {
throw new UnsupportedOperationException();
}
- @Override
- public SystemKeyCache getSystemKeyCache() {
- return null;
- }
-
- @Override
- public ManagedKeyDataCache getManagedKeyDataCache() {
- return null;
- }
-
- @Override
- public KeymetaAdmin getKeymetaAdmin() {
- return null;
- }
-
@Override
public FileSystem getFileSystem() {
throw new UnsupportedOperationException();
@@ -137,4 +120,9 @@ public Connection createConnection(Configuration conf) throws IOException {
public AsyncClusterConnection getAsyncClusterConnection() {
throw new UnsupportedOperationException();
}
+
+ @Override
+ public KeyManagementService getKeyManagementService() {
+ return null;
+ }
}
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index fb699554fc06..d595b2dd219d 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -41,6 +41,12 @@
org.apache.hbase
hbase-common
+
+ org.apache.hbase
+ hbase-common
+ test-jar
+ test
+
org.apache.hbase
hbase-annotations
diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb
index 9b24e5caa973..a7e531806cfe 100644
--- a/hbase-shell/src/main/ruby/hbase/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase/hbase.rb
@@ -1,3 +1,5 @@
+# frozen_string_literal: true
+
#
#
# Licensed to the Apache Software Foundation (ASF) under one
@@ -29,6 +31,7 @@
require 'hbase/visibility_labels'
module Hbase
+ # Main HBase class for connection and admin operations
class Hbase
attr_accessor :configuration
@@ -45,22 +48,21 @@ def initialize(config = nil)
end
def connection
- if @connection.nil?
- @connection = ConnectionFactory.createConnection(configuration)
- end
+ @connection = ConnectionFactory.createConnection(configuration) if @connection.nil?
@connection
end
+
# Returns ruby's Admin class from admin.rb
def admin
- ::Hbase::Admin.new(self.connection)
+ ::Hbase::Admin.new(connection)
end
def rsgroup_admin
- ::Hbase::RSGroupAdmin.new(self.connection)
+ ::Hbase::RSGroupAdmin.new(connection)
end
def keymeta_admin
- ::Hbase::KeymetaAdmin.new(@connection)
+ ::Hbase::KeymetaAdmin.new(connection)
end
def taskmonitor
@@ -69,7 +71,7 @@ def taskmonitor
# Create new one each time
def table(table, shell)
- ::Hbase::Table.new(self.connection.getTable(TableName.valueOf(table)), shell)
+ ::Hbase::Table.new(connection.getTable(TableName.valueOf(table)), shell)
end
def replication_admin
@@ -77,21 +79,19 @@ def replication_admin
end
def security_admin
- ::Hbase::SecurityAdmin.new(self.connection.getAdmin)
+ ::Hbase::SecurityAdmin.new(connection.getAdmin)
end
def visibility_labels_admin
- ::Hbase::VisibilityLabelsAdmin.new(self.connection.getAdmin)
+ ::Hbase::VisibilityLabelsAdmin.new(connection.getAdmin)
end
def quotas_admin
- ::Hbase::QuotasAdmin.new(self.connection.getAdmin)
+ ::Hbase::QuotasAdmin.new(connection.getAdmin)
end
def shutdown
- if @connection != nil
- @connection.close
- end
+ @connection&.close
end
end
end
diff --git a/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb
index e2af5f524cc3..98a57766831a 100644
--- a/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb
@@ -34,7 +34,7 @@ def format_status_row(status)
[
status.getKeyCustodianEncoded,
status.getKeyNamespace,
- status.getKeyStatus.toString,
+ status.getKeyState.toString,
status.getKeyMetadata,
status.getKeyMetadataHashEncoded,
status.getRefreshTimestamp
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java
new file mode 100644
index 000000000000..b67fbc69f3c7
--- /dev/null
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.util.Base64;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.KeymetaTestUtils;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.jruby.embed.ScriptingContainer;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ ClientTests.class, IntegrationTests.class })
+public class TestKeymetaAdminShell extends ManagedKeyTestBase implements RubyShellTest {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaAdminShell.class);
+
+ private final ScriptingContainer jruby = new ScriptingContainer();
+
+ @Before
+ public void setUp() throws Exception {
+ final Configuration conf = TEST_UTIL.getConfiguration();
+ conf.set("zookeeper.session.timeout", "6000000");
+ conf.set("hbase.rpc.timeout", "6000000");
+ conf.set("hbase.rpc.read.timeout", "6000000");
+ conf.set("hbase.rpc.write.timeout", "6000000");
+ conf.set("hbase.client.operation.timeout", "6000000");
+ conf.set("hbase.client.scanner.timeout.period", "6000000");
+ conf.set("hbase.ipc.client.socket.timeout.connect", "6000000");
+ conf.set("hbase.ipc.client.socket.timeout.read", "6000000");
+ conf.set("hbase.ipc.client.socket.timeout.write", "6000000");
+ conf.set("hbase.master.start.timeout.localHBaseCluster", "6000000");
+ conf.set("hbase.master.init.timeout.localHBaseCluster", "6000000");
+ conf.set("hbase.client.sync.wait.timeout.msec", "6000000");
+ Map cust2key = new HashMap<>();
+ Map cust2alias = new HashMap<>();
+ String clusterId = UUID.randomUUID().toString();
+ String SYSTEM_KEY_ALIAS = "system-key-alias";
+ String CUST1 = "cust1";
+ String CUST1_ALIAS = "cust1-alias";
+ String GLOB_CUST_ALIAS = "glob-cust-alias";
+ String providerParams = KeymetaTestUtils.setupTestKeyStore(TEST_UTIL, true, true, store -> {
+ Properties p = new Properties();
+ try {
+ KeymetaTestUtils.addEntry(conf, 128, store, CUST1_ALIAS, CUST1, true, cust2key, cust2alias,
+ p);
+ KeymetaTestUtils.addEntry(conf, 128, store, GLOB_CUST_ALIAS, "*", true, cust2key,
+ cust2alias, p);
+ KeymetaTestUtils.addEntry(conf, 128, store, SYSTEM_KEY_ALIAS, clusterId, true, cust2key,
+ cust2alias, p);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return p;
+ });
+ // byte[] systemKey = cust2key.get(new Bytes(clusterId.getBytes())).get();
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS);
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, providerParams);
+ RubyShellTest.setUpConfig(this);
+ super.setUp();
+ RubyShellTest.setUpJRubyRuntime(this);
+ RubyShellTest.doTestSetup(this);
+ addCustodianRubyEnvVars(jruby, "CUST1", CUST1);
+ }
+
+ @Override
+ public HBaseTestingUtil getTEST_UTIL() {
+ return TEST_UTIL;
+ }
+
+ @Override
+ public ScriptingContainer getJRuby() {
+ return jruby;
+ }
+
+ @Override
+ public String getSuitePattern() {
+ return "**/*_keymeta_test.rb";
+ }
+
+ @Test
+ public void testRunShellTests() throws Exception {
+ RubyShellTest.testRunShellTests(this);
+ }
+
+ @Override
+ protected Class extends ManagedKeyProvider> getKeyProviderClass() {
+ return ManagedKeyStoreKeyProvider.class;
+ }
+
+ public static void addCustodianRubyEnvVars(ScriptingContainer jruby, String custId,
+ String custodian) {
+ jruby.put("$" + custId, custodian);
+ jruby.put("$" + custId + "_ALIAS", custodian + "-alias");
+ jruby.put("$" + custId + "_ENCODED", Base64.getEncoder().encodeToString(custodian.getBytes()));
+ }
+}
diff --git a/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb
new file mode 100644
index 000000000000..c1108d0fc7d1
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb
@@ -0,0 +1,61 @@
+# frozen_string_literal: true
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+
+module Hbase
+ # Test class for keymeta admin functionality
+ class KeymetaAdminTest < Test::Unit::TestCase
+ include TestHelpers
+
+ def setup
+ setup_hbase
+ end
+
+ define_test 'Test enable key management' do
+ cust_and_namespace = "#{$CUST1_ENCODED}:*"
+ # Repeat the enable twice in a loop and ensure multiple enables succeed and return the
+ # same output.
+ 2.times do |i|
+ output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
+ puts "enable_key_management #{i} output: #{output}"
+ assert(output.include?("#{$CUST1_ENCODED} * ACTIVE"))
+ end
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status output: #{output}"
+ assert(output.include?("#{$CUST1_ENCODED} * ACTIVE"))
+
+ # The ManagedKeyStoreKeyProvider doesn't support specific namespaces, so it will return the
+ # global key.
+ cust_and_namespace = "#{$CUST1_ENCODED}:test_table/f"
+ output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
+ puts "enable_key_management output: #{output}"
+ assert(output.include?("#{$CUST1_ENCODED} * ACTIVE"))
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status output: #{output}"
+ assert(output.include?('0 row(s)'))
+ end
+ end
+end
diff --git a/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb
new file mode 100644
index 000000000000..be52a2524e4d
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb
@@ -0,0 +1,143 @@
+# frozen_string_literal: true
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+
+java_import org.apache.hadoop.conf.Configuration
+java_import org.apache.hadoop.fs.FSDataInputStream
+java_import org.apache.hadoop.hbase.CellUtil
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.client.Get
+java_import org.apache.hadoop.hbase.io.crypto.Encryption
+java_import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider
+java_import org.apache.hadoop.hbase.io.hfile.CorruptHFileException
+java_import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
+java_import org.apache.hadoop.hbase.io.hfile.HFile
+java_import org.apache.hadoop.hbase.io.hfile.CacheConfig
+java_import org.apache.hadoop.hbase.util.Bytes
+
+module Hbase
+ # Test class for encrypted table keymeta functionality
+ class EncryptedTableKeymetaTest < Test::Unit::TestCase
+ include TestHelpers
+
+ def setup
+ setup_hbase
+ @test_table = 'enctest'
+ @connection = $TEST_CLUSTER.connection
+ end
+
+ define_test 'Test table put/get with encryption' do
+ cust_and_namespace = "#{$CUST1_ENCODED}:*"
+ @shell.command(:enable_key_management, cust_and_namespace)
+ @shell.command(:create, @test_table, { 'NAME' => 'f', 'ENCRYPTION' => 'AES' })
+ test_table = table(@test_table)
+ test_table.put('1', 'f:a', '2')
+ puts "Added a row, now flushing table #{@test_table}"
+ command(:flush, @test_table)
+
+ table_name = TableName.valueOf(@test_table)
+ store_file_info = nil
+ $TEST_CLUSTER.getRSForFirstRegionInTable(table_name).getRegions(table_name).each do |region|
+ region.getStores.each do |store|
+ store.getStorefiles.each do |storefile|
+ store_file_info = storefile.getFileInfo
+ end
+ end
+ end
+ assert_not_nil(store_file_info)
+ hfile_info = store_file_info.getHFileInfo
+ assert_not_nil(hfile_info)
+ live_trailer = hfile_info.getTrailer
+ assert_trailer(live_trailer)
+
+ ## Disable table to ensure that the stores are not cached.
+ command(:disable, @test_table)
+ assert(!command(:is_enabled, @test_table))
+
+ # Open FSDataInputStream to the path pointed to by the store_file_info
+ fs = store_file_info.getFileSystem
+ fio = fs.open(store_file_info.getPath)
+ assert_not_nil(fio)
+ # Read trailer using FiledFileTrailer
+ offline_trailer = FixedFileTrailer.readFromStream(
+ fio, fs.getFileStatus(store_file_info.getPath).getLen
+ )
+ fio.close
+ assert_trailer(offline_trailer, live_trailer)
+
+ # Test for the ability to read HFile with encryption in an offline offline
+ reader = HFile.createReader(fs, store_file_info.getPath, CacheConfig::DISABLED, true,
+ $TEST_CLUSTER.getConfiguration)
+ assert_not_nil(reader)
+ offline_trailer = reader.getTrailer
+ assert_trailer(offline_trailer, live_trailer)
+ scanner = reader.getScanner($TEST_CLUSTER.getConfiguration, false, false)
+ assert_true(scanner.seekTo)
+ cell = scanner.getCell
+ assert_equal('1', Bytes.toString(CellUtil.cloneRow(cell)))
+ assert_equal('2', Bytes.toString(CellUtil.cloneValue(cell)))
+ assert_false(scanner.next)
+
+ # Confirm that the offline reading will fail with no config related to encryption
+ Encryption.clearKeyProviderCache
+ conf = Configuration.new($TEST_CLUSTER.getConfiguration)
+ conf.set(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.java_class.getName)
+ # This is expected to fail with CorruptHFileException.
+ assert_raises(CorruptHFileException) do |e|
+ reader = HFile.createReader(fs, store_file_info.getPath, CacheConfig::DISABLED, true, conf)
+ assert_true(e.message.include?(
+ "Problem reading HFile Trailer from file #{store_file_info.getPath}"
+ ))
+ end
+ Encryption.clearKeyProviderCache
+
+ ## Enable back the table to be able to query.
+ command(:enable, @test_table)
+ assert(command(:is_enabled, @test_table))
+
+ get = Get.new(Bytes.toBytes('1'))
+ res = test_table.table.get(get)
+ puts "res for row '1' and column f:a: #{res}"
+ assert_false(res.isEmpty)
+ assert_equal('2', Bytes.toString(res.getValue(Bytes.toBytes('f'), Bytes.toBytes('a'))))
+ end
+
+ def assert_trailer(offline_trailer, live_trailer = nil)
+ assert_not_nil(offline_trailer)
+ assert_not_nil(offline_trailer.getEncryptionKey)
+ assert_not_nil(offline_trailer.getKEKMetadata)
+ assert_not_nil(offline_trailer.getKEKChecksum)
+ assert_not_nil(offline_trailer.getKeyNamespace)
+
+ return unless live_trailer
+
+ assert_equal(live_trailer.getEncryptionKey, offline_trailer.getEncryptionKey)
+ assert_equal(live_trailer.getKEKMetadata, offline_trailer.getKEKMetadata)
+ assert_equal(live_trailer.getKEKChecksum, offline_trailer.getKEKChecksum)
+ assert_equal(live_trailer.getKeyNamespace, offline_trailer.getKeyNamespace)
+ end
+ end
+end
From bad5c8fe4fc3fe69a4821844089a2d77fb36fd7f Mon Sep 17 00:00:00 2001
From: Hari Krishna Dara
Date: Tue, 14 Oct 2025 12:05:08 +0530
Subject: [PATCH 5/9] HBASE-29617: Changes to support smooth migration to key
management (#7362)
---
.../hbase/client/ColumnFamilyDescriptor.java | 3 +
.../client/ColumnFamilyDescriptorBuilder.java | 24 +
.../org/apache/hadoop/hbase/HConstants.java | 8 +
.../hadoop/hbase/io/crypto/Encryption.java | 58 +-
.../hbase/io/crypto/ManagedKeyProvider.java | 10 +-
.../io/crypto/ManagedKeyStoreKeyProvider.java | 61 +-
.../hbase/io/crypto/MockAesKeyProvider.java | 10 +-
.../hbase/io/crypto/KeymetaTestUtils.java | 22 +-
.../io/crypto/MockManagedKeyProvider.java | 4 +-
.../hbase/io/crypto/TestKeyProvider.java | 14 +
.../io/crypto/TestManagedKeyProvider.java | 299 +++++++-
.../hbase/keymeta/KeyManagementBase.java | 13 +-
.../hadoop/hbase/regionserver/HStore.java | 6 +-
.../hadoop/hbase/security/SecurityUtil.java | 211 ++++--
.../hadoop/hbase/util/EncryptionTest.java | 19 +-
.../ManagedKeyProviderInterceptor.java | 4 +-
.../hbase/keymeta/ManagedKeyTestBase.java | 88 ++-
.../hbase/keymeta/TestKeyManagementBase.java | 13 +-
.../keymeta/TestKeyManagementService.java | 8 +-
.../keymeta/TestKeymetaTableAccessor.java | 5 +-
.../keymeta/TestManagedKeyDataCache.java | 2 +-
.../hbase/keymeta/TestManagedKeymeta.java | 2 +-
.../hbase/master/TestKeymetaAdminImpl.java | 16 +-
.../hbase/master/TestSystemKeyManager.java | 5 +-
.../hbase/security/TestSecurityUtil.java | 506 +++++++++++---
.../hadoop/hbase/util/TestEncryptionTest.java | 76 +++
hbase-shell/src/main/ruby/hbase/admin.rb | 4 +
.../hbase/client/TestKeymetaAdminShell.java | 82 ++-
.../hbase/client/TestKeymetaMigration.java | 52 ++
.../src/test/ruby/shell/admin_keymeta_test.rb | 37 +-
.../shell/encrypted_table_keymeta_test.rb | 49 +-
.../key_provider_keymeta_migration_test.rb | 641 ++++++++++++++++++
hbase-shell/src/test/ruby/tests_runner.rb | 3 +
33 files changed, 2054 insertions(+), 301 deletions(-)
create mode 100644 hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMigration.java
create mode 100644 hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
index 369b2be8ecda..ea8d81043694 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
@@ -114,6 +114,9 @@ public interface ColumnFamilyDescriptor {
/** Returns Return the raw crypto key attribute for the family, or null if not set */
byte[] getEncryptionKey();
+ /** Returns the encryption key namespace for this family */
+ String getEncryptionKeyNamespace();
+
/** Returns Return the encryption algorithm in use by this family */
String getEncryptionType();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index 42f25fdc56f4..12bb73565078 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -167,6 +167,10 @@ public class ColumnFamilyDescriptorBuilder {
@InterfaceAudience.Private
public static final String ENCRYPTION_KEY = "ENCRYPTION_KEY";
private static final Bytes ENCRYPTION_KEY_BYTES = new Bytes(Bytes.toBytes(ENCRYPTION_KEY));
+ @InterfaceAudience.Private
+ public static final String ENCRYPTION_KEY_NAMESPACE = "ENCRYPTION_KEY_NAMESPACE";
+ private static final Bytes ENCRYPTION_KEY_NAMESPACE_BYTES =
+ new Bytes(Bytes.toBytes(ENCRYPTION_KEY_NAMESPACE));
private static final boolean DEFAULT_MOB = false;
@InterfaceAudience.Private
@@ -320,6 +324,7 @@ public static Map getDefaultValues() {
DEFAULT_VALUES.keySet().forEach(s -> RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(s))));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY)));
+ RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(ENCRYPTION_KEY_NAMESPACE)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(IS_MOB)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(MOB_THRESHOLD)));
RESERVED_KEYWORDS.add(new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)));
@@ -522,6 +527,11 @@ public ColumnFamilyDescriptorBuilder setEncryptionKey(final byte[] value) {
return this;
}
+ public ColumnFamilyDescriptorBuilder setEncryptionKeyNamespace(final String value) {
+ desc.setEncryptionKeyNamespace(value);
+ return this;
+ }
+
public ColumnFamilyDescriptorBuilder setEncryptionType(String value) {
desc.setEncryptionType(value);
return this;
@@ -1337,6 +1347,20 @@ public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) {
return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes));
}
+ @Override
+ public String getEncryptionKeyNamespace() {
+ return getStringOrDefault(ENCRYPTION_KEY_NAMESPACE_BYTES, Function.identity(), null);
+ }
+
+ /**
+ * Set the encryption key namespace attribute for the family
+ * @param keyNamespace the key namespace, or null to remove existing setting
+ * @return this (for chained invocation)
+ */
+ public ModifyableColumnFamilyDescriptor setEncryptionKeyNamespace(String keyNamespace) {
+ return setValue(ENCRYPTION_KEY_NAMESPACE_BYTES, keyNamespace);
+ }
+
@Override
public long getMobThreshold() {
return getStringOrDefault(MOB_THRESHOLD_BYTES, Long::valueOf, DEFAULT_MOB_THRESHOLD);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 2dca4f7e452d..73637f0cd20e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1288,6 +1288,14 @@ public enum OperationStatusCode {
public static final String CRYPTO_KEYPROVIDER_PARAMETERS_KEY =
"hbase.crypto.keyprovider.parameters";
+ /** Configuration key for the managed crypto key provider, a class name */
+ public static final String CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY =
+ "hbase.crypto.managed.keyprovider";
+
+ /** Configuration key for the managed crypto key provider parameters */
+ public static final String CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY =
+ "hbase.crypto.managed.keyprovider.parameters";
+
/** Configuration key for the name of the master key for the cluster, a string */
public static final String CRYPTO_MASTERKEY_NAME_CONF_KEY = "hbase.crypto.master.key.name";
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 91af77361a0e..e8d965adebba 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -29,6 +29,7 @@
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiFunction;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
@@ -556,32 +557,45 @@ public static CipherProvider getCipherProvider(Configuration conf) {
}
}
- static final Map, KeyProvider> keyProviderCache = new ConcurrentHashMap<>();
+ static final Map, Object> keyProviderCache = new ConcurrentHashMap<>();
- public static KeyProvider getKeyProvider(Configuration conf) {
- String providerClassName =
- conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName());
- String providerParameters = conf.get(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "");
- try {
- Pair providerCacheKey = new Pair<>(providerClassName, providerParameters);
- KeyProvider provider = keyProviderCache.get(providerCacheKey);
- if (provider != null) {
- return provider;
- }
- provider = (KeyProvider) ReflectionUtils
- .newInstance(getClassLoaderForClass(KeyProvider.class).loadClass(providerClassName), conf);
- provider.init(providerParameters);
- if (provider instanceof ManagedKeyProvider) {
- ((ManagedKeyProvider) provider).initConfig(conf);
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Installed " + providerClassName + " into key provider cache");
+ private static Object createProvider(final Configuration conf, String classNameKey,
+ String parametersKey, Class> defaultProviderClass, ClassLoader classLoaderForClass,
+ BiFunction initFunction) {
+ String providerClassName = conf.get(classNameKey, defaultProviderClass.getName());
+ String providerParameters = conf.get(parametersKey, "");
+ Pair providerCacheKey = new Pair<>(providerClassName, providerParameters);
+ Object provider = keyProviderCache.get(providerCacheKey);
+ if (provider == null) {
+ try {
+ provider =
+ ReflectionUtils.newInstance(classLoaderForClass.loadClass(providerClassName), conf);
+ initFunction.apply(provider, providerParameters);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
}
keyProviderCache.put(providerCacheKey, provider);
- return provider;
- } catch (Exception e) {
- throw new RuntimeException(e);
+ LOG.debug("Installed " + providerClassName + " into key provider cache");
}
+ return provider;
+ }
+
+ public static KeyProvider getKeyProvider(final Configuration conf) {
+ return (KeyProvider) createProvider(conf, HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
+ HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, KeyStoreKeyProvider.class,
+ getClassLoaderForClass(KeyProvider.class), (provider, providerParameters) -> {
+ ((KeyProvider) provider).init(providerParameters);
+ return null;
+ });
+ }
+
+ public static ManagedKeyProvider getManagedKeyProvider(final Configuration conf) {
+ return (ManagedKeyProvider) createProvider(conf, HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY, ManagedKeyProvider.class,
+ getClassLoaderForClass(ManagedKeyProvider.class), (provider, providerParameters) -> {
+ ((ManagedKeyProvider) provider).initConfig(conf, providerParameters);
+ return null;
+ });
}
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
index 512f78a1f9f5..c3adc5867bd1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
@@ -25,16 +25,16 @@
/**
* Interface for key providers of managed keys. Defines methods for generating and managing managed
- * keys, as well as handling key storage and retrieval. The interface extends the basic
- * {@link KeyProvider} interface with additional methods for working with managed keys.
+ * keys, as well as handling key storage and retrieval.
*/
@InterfaceAudience.Public
-public interface ManagedKeyProvider extends KeyProvider {
+public interface ManagedKeyProvider {
/**
* Initialize the provider with the given configuration.
- * @param conf Hadoop configuration
+ * @param conf Hadoop configuration
+ * @param providerParameters provider parameters
*/
- void initConfig(Configuration conf);
+ void initConfig(Configuration conf, String providerParameters);
/**
* Retrieve the system key using the given system identifier.
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
index 74f892f7ad89..15e49bd692e4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
@@ -31,6 +31,7 @@
public class ManagedKeyStoreKeyProvider extends KeyStoreKeyProvider implements ManagedKeyProvider {
public static final String KEY_METADATA_ALIAS = "KeyAlias";
public static final String KEY_METADATA_CUST = "KeyCustodian";
+ public static final String KEY_METADATA_NAMESPACE = "KeyNamespace";
private static final java.lang.reflect.Type KEY_METADATA_TYPE =
new TypeToken>() {
@@ -39,8 +40,11 @@ public class ManagedKeyStoreKeyProvider extends KeyStoreKeyProvider implements M
private Configuration conf;
@Override
- public void initConfig(Configuration conf) {
+ public void initConfig(Configuration conf, String providerParameters) {
this.conf = conf;
+ if (providerParameters != null) {
+ super.init(providerParameters);
+ }
}
@Override
@@ -56,8 +60,8 @@ public ManagedKeyData getSystemKey(byte[] clusterId) {
throw new RuntimeException("Unable to find system key with alias: " + systemKeyAlias);
}
// Encode clusterId too for consistency with that of key custodian.
- String keyMetadata =
- generateKeyMetadata(systemKeyAlias, ManagedKeyProvider.encodeToStr(clusterId));
+ String keyMetadata = generateKeyMetadata(systemKeyAlias,
+ ManagedKeyProvider.encodeToStr(clusterId), ManagedKeyData.KEY_SPACE_GLOBAL);
return new ManagedKeyData(clusterId, ManagedKeyData.KEY_SPACE_GLOBAL, key,
ManagedKeyState.ACTIVE, keyMetadata);
}
@@ -66,9 +70,25 @@ public ManagedKeyData getSystemKey(byte[] clusterId) {
public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException {
checkConfig();
String encodedCust = ManagedKeyProvider.encodeToStr(key_cust);
- String aliasConfKey =
- HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + "." + "alias";
- String keyMetadata = generateKeyMetadata(conf.get(aliasConfKey, null), encodedCust);
+
+ // Handle null key_namespace by defaulting to global namespace
+ if (key_namespace == null) {
+ key_namespace = ManagedKeyData.KEY_SPACE_GLOBAL;
+ }
+
+ // Get alias configuration for the specific custodian+namespace combination
+ String aliasConfKey = buildAliasConfKey(encodedCust, key_namespace);
+ String alias = conf.get(aliasConfKey, null);
+
+ // Generate metadata with actual alias (used for both success and failure cases)
+ String keyMetadata = generateKeyMetadata(alias, encodedCust, key_namespace);
+
+ // If no alias is configured for this custodian+namespace combination, treat as key not found
+ if (alias == null) {
+ return new ManagedKeyData(key_cust, key_namespace, null, ManagedKeyState.FAILED, keyMetadata);
+ }
+
+ // Namespaces match, proceed to get the key
return unwrapKey(keyMetadata, null);
}
@@ -77,17 +97,21 @@ public ManagedKeyData unwrapKey(String keyMetadataStr, byte[] wrappedKey) throws
Map keyMetadata =
GsonUtil.getDefaultInstance().fromJson(keyMetadataStr, KEY_METADATA_TYPE);
String encodedCust = keyMetadata.get(KEY_METADATA_CUST);
- String activeStatusConfKey =
- HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + ".active";
+ String namespace = keyMetadata.get(KEY_METADATA_NAMESPACE);
+ if (namespace == null) {
+ // For backwards compatibility, default to global namespace
+ namespace = ManagedKeyData.KEY_SPACE_GLOBAL;
+ }
+ String activeStatusConfKey = buildActiveStatusConfKey(encodedCust, namespace);
boolean isActive = conf.getBoolean(activeStatusConfKey, true);
byte[] key_cust = ManagedKeyProvider.decodeToBytes(encodedCust);
String alias = keyMetadata.get(KEY_METADATA_ALIAS);
Key key = alias != null ? getKey(alias) : null;
if (key != null) {
- return new ManagedKeyData(key_cust, ManagedKeyData.KEY_SPACE_GLOBAL, key,
+ return new ManagedKeyData(key_cust, namespace, key,
isActive ? ManagedKeyState.ACTIVE : ManagedKeyState.INACTIVE, keyMetadataStr);
}
- return new ManagedKeyData(key_cust, ManagedKeyData.KEY_SPACE_GLOBAL, null,
+ return new ManagedKeyData(key_cust, namespace, null,
isActive ? ManagedKeyState.FAILED : ManagedKeyState.DISABLED, keyMetadataStr);
}
@@ -98,9 +122,24 @@ private void checkConfig() {
}
public static String generateKeyMetadata(String aliasName, String encodedCust) {
- Map metadata = new HashMap<>(2);
+ return generateKeyMetadata(aliasName, encodedCust, ManagedKeyData.KEY_SPACE_GLOBAL);
+ }
+
+ public static String generateKeyMetadata(String aliasName, String encodedCust, String namespace) {
+ Map metadata = new HashMap<>(3);
metadata.put(KEY_METADATA_ALIAS, aliasName);
metadata.put(KEY_METADATA_CUST, encodedCust);
+ metadata.put(KEY_METADATA_NAMESPACE, namespace);
return GsonUtil.getDefaultInstance().toJson(metadata, HashMap.class);
}
+
+ private String buildAliasConfKey(String encodedCust, String namespace) {
+ return HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + "." + namespace
+ + ".alias";
+ }
+
+ private String buildActiveStatusConfKey(String encodedCust, String namespace) {
+ return HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + "." + namespace
+ + ".active";
+ }
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java
index 39f460e062ae..0b85f1d76de1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/MockAesKeyProvider.java
@@ -50,7 +50,11 @@ public Key[] getKeys(String[] aliases) {
if (keys.containsKey(aliases[i])) {
result[i] = keys.get(aliases[i]);
} else {
- result[i] = new SecretKeySpec(Encryption.hash128(aliases[i]), "AES");
+ // When not caching keys, we want to make the key generation deterministic.
+ result[i] = new SecretKeySpec(
+ Encryption.hash128(
+ cacheKeys ? aliases[i] + "-" + String.valueOf(System.currentTimeMillis()) : aliases[i]),
+ "AES");
if (cacheKeys) {
keys.put(aliases[i], result[i]);
}
@@ -58,4 +62,8 @@ public Key[] getKeys(String[] aliases) {
}
return result;
}
+
+ public void clearKeys() {
+ keys.clear();
+ }
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java
index 3a8fb3d32464..f02979cd9893 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeymetaTestUtils.java
@@ -116,6 +116,14 @@ private KeymetaTestUtils() {
public static void addEntry(Configuration conf, int keyLen, KeyStore store, String alias,
String custodian, boolean withPasswordOnAlias, Map cust2key,
Map cust2alias, Properties passwordFileProps) throws Exception {
+ addEntry(conf, keyLen, store, alias, custodian, withPasswordOnAlias, cust2key, cust2alias,
+ passwordFileProps, ManagedKeyData.KEY_SPACE_GLOBAL);
+ }
+
+ public static void addEntry(Configuration conf, int keyLen, KeyStore store, String alias,
+ String custodian, boolean withPasswordOnAlias, Map cust2key,
+ Map cust2alias, Properties passwordFileProps, String namespace)
+ throws Exception {
Preconditions.checkArgument(keyLen == 256 || keyLen == 128, "Key length must be 256 or 128");
byte[] key =
MessageDigest.getInstance(keyLen == 256 ? "SHA-256" : "MD5").digest(Bytes.toBytes(alias));
@@ -124,8 +132,18 @@ public static void addEntry(Configuration conf, int keyLen, KeyStore store, Stri
store.setEntry(alias, new KeyStore.SecretKeyEntry(new SecretKeySpec(key, "AES")),
new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0]));
String encCust = Base64.getEncoder().encodeToString(custodian.getBytes());
- String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + "." + "alias";
- conf.set(confKey, alias);
+
+ // Use new format: PREFIX.{encodedCust}.{namespace}.alias
+ // For global namespace use "*", for custom namespace use actual namespace name
+ String namespaceKey = ManagedKeyData.KEY_SPACE_GLOBAL.equals(namespace) ? "*" : namespace;
+ String aliasConfKey =
+ HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + "." + namespaceKey + ".alias";
+ String activeStatusConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + "."
+ + namespaceKey + ".active";
+
+ conf.set(aliasConfKey, alias);
+ conf.setBoolean(activeStatusConfKey, true);
+
if (passwordFileProps != null) {
passwordFileProps.setProperty(alias, PASSWORD);
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
index 99c9c132d7d4..6782a7d11636 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
@@ -45,8 +45,8 @@ public class MockManagedKeyProvider extends MockAesKeyProvider implements Manage
private String systemKeyAlias = "default_system_key_alias";
@Override
- public void initConfig(Configuration conf) {
- // NO-OP
+ public void initConfig(Configuration conf, String providerParameters) {
+ super.init(providerParameters);
}
@Override
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyProvider.java
index 741cf05744d8..14718ddfc44e 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyProvider.java
@@ -55,4 +55,18 @@ public void testTestProvider() {
key.getEncoded().length);
}
+ @Test
+ public void testManagedKeyProvider() {
+ Configuration conf = HBaseConfiguration.create();
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
+ ManagedKeyProvider provider = Encryption.getManagedKeyProvider(conf);
+ assertNotNull("Null returned for managed provider", provider);
+ assertTrue("Provider is not the expected type", provider instanceof MockManagedKeyProvider);
+
+ // Test that it's cached
+ ManagedKeyProvider provider2 = Encryption.getManagedKeyProvider(conf);
+ assertTrue("Provider should be cached and same instance", provider == provider2);
+ }
+
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
index 405c5731be94..7a003f2943ed 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java
@@ -29,12 +29,14 @@
import java.security.KeyStore;
import java.util.Arrays;
import java.util.Base64;
+import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -57,26 +59,53 @@
public class TestManagedKeyProvider {
@RunWith(Parameterized.class)
@Category({ MiscTests.class, SmallTests.class })
- public static class TestManagedKeyStoreKeyProvider extends TestKeyStoreKeyProvider {
+ public static class TestManagedKeyStoreKeyProvider {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestManagedKeyStoreKeyProvider.class);
private static final String SYSTEM_KEY_ALIAS = "system-alias";
+ static final HBaseCommonTestingUtil TEST_UTIL = new HBaseCommonTestingUtil();
+ static byte[] KEY;
+
+ @Parameterized.Parameter(0)
+ public boolean withPasswordOnAlias;
+ @Parameterized.Parameter(1)
+ public boolean withPasswordFile;
+
+ @Parameterized.Parameters(name = "withPasswordOnAlias={0} withPasswordFile={1}")
+ public static Collection parameters() {
+ return Arrays
+ .asList(new Object[][] { { Boolean.TRUE, Boolean.TRUE }, { Boolean.TRUE, Boolean.FALSE },
+ { Boolean.FALSE, Boolean.TRUE }, { Boolean.FALSE, Boolean.FALSE }, });
+ }
+
+ // TestManagedKeyStoreKeyProvider specific fields
private Configuration conf = HBaseConfiguration.create();
private int nCustodians = 2;
private ManagedKeyProvider managedKeyProvider;
private Map cust2key = new HashMap<>();
private Map cust2alias = new HashMap<>();
+ private Map namespaceCust2key = new HashMap<>();
+ private Map namespaceCust2alias = new HashMap<>();
private String clusterId;
private byte[] systemKey;
@Before
public void setUp() throws Exception {
- super.setUp();
- managedKeyProvider = (ManagedKeyProvider) provider;
- managedKeyProvider.initConfig(conf);
+ String providerParams = KeymetaTestUtils.setupTestKeyStore(TEST_UTIL, withPasswordOnAlias,
+ withPasswordFile, store -> {
+ Properties passwdProps = new Properties();
+ try {
+ addCustomEntries(store, passwdProps);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return passwdProps;
+ });
+ managedKeyProvider = (ManagedKeyProvider) createProvider();
+ managedKeyProvider.initConfig(conf, providerParams);
}
protected KeyProvider createProvider() {
@@ -84,7 +113,7 @@ protected KeyProvider createProvider() {
}
protected void addCustomEntries(KeyStore store, Properties passwdProps) throws Exception {
- super.addCustomEntries(store, passwdProps);
+ // TestManagedKeyStoreKeyProvider specific entries
for (int i = 0; i < nCustodians; ++i) {
String custodian = "custodian+ " + i;
String alias = custodian + "-alias";
@@ -92,6 +121,17 @@ protected void addCustomEntries(KeyStore store, Properties passwdProps) throws E
cust2alias, passwdProps);
}
+ // Add custom namespace entries for testing
+ String customNamespace1 = "table1/cf1";
+ String customNamespace2 = "table2";
+ for (int i = 0; i < 2; ++i) {
+ String custodian = "ns-custodian+ " + i;
+ String alias = custodian + "-alias";
+ String namespace = (i == 0) ? customNamespace1 : customNamespace2;
+ KeymetaTestUtils.addEntry(conf, 256, store, alias, custodian, withPasswordOnAlias,
+ namespaceCust2key, namespaceCust2alias, passwdProps, namespace);
+ }
+
clusterId = UUID.randomUUID().toString();
KeymetaTestUtils.addEntry(conf, 256, store, SYSTEM_KEY_ALIAS, clusterId, withPasswordOnAlias,
cust2key, cust2alias, passwdProps);
@@ -104,7 +144,7 @@ protected void addCustomEntries(KeyStore store, Properties passwdProps) throws E
@Test
public void testMissingConfig() throws Exception {
- managedKeyProvider.initConfig(null);
+ managedKeyProvider.initConfig(null, null);
RuntimeException ex =
assertThrows(RuntimeException.class, () -> managedKeyProvider.getSystemKey(null));
assertEquals("initConfig is not called or config is null", ex.getMessage());
@@ -133,7 +173,8 @@ public void testGetGlobalCustodianKey() throws Exception {
public void testGetInactiveKey() throws Exception {
Bytes firstCust = cust2key.keySet().iterator().next();
String encCust = Base64.getEncoder().encodeToString(firstCust.get());
- conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + ".active", "false");
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + ".*.active",
+ "false");
ManagedKeyData keyData =
managedKeyProvider.getManagedKey(firstCust.get(), ManagedKeyData.KEY_SPACE_GLOBAL);
assertNotNull(keyData);
@@ -154,12 +195,15 @@ public void testGetInvalidKey() throws Exception {
public void testGetDisabledKey() throws Exception {
byte[] invalidCust = new byte[] { 1, 2, 3 };
String invalidCustEnc = ManagedKeyProvider.encodeToStr(invalidCust);
- conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".active",
+ // For disabled keys, we need to configure both alias and active status
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".*.alias",
+ "disabled-alias");
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".*.active",
"false");
ManagedKeyData keyData =
managedKeyProvider.getManagedKey(invalidCust, ManagedKeyData.KEY_SPACE_GLOBAL);
assertNotNull(keyData);
- assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidCust, null);
+ assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidCust, "disabled-alias");
}
@Test
@@ -193,19 +237,239 @@ public void testUnwrapDisabledKey() throws Exception {
String invalidAlias = "invalidAlias";
byte[] invalidCust = new byte[] { 1, 2, 3 };
String invalidCustEnc = ManagedKeyProvider.encodeToStr(invalidCust);
- conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".active",
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidCustEnc + ".*.active",
"false");
- String invalidMetadata =
- ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, invalidCustEnc);
+ String invalidMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias,
+ invalidCustEnc, ManagedKeyData.KEY_SPACE_GLOBAL);
ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null);
assertNotNull(keyData);
assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidCust, invalidAlias);
}
+ @Test
+ public void testGetManagedKeyWithCustomNamespace() throws Exception {
+ String customNamespace1 = "table1/cf1";
+ String customNamespace2 = "table2";
+ int index = 0;
+ for (Bytes cust : namespaceCust2key.keySet()) {
+ String namespace = (index == 0) ? customNamespace1 : customNamespace2;
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(cust.get(), namespace);
+ assertKeyDataWithNamespace(keyData, ManagedKeyState.ACTIVE,
+ namespaceCust2key.get(cust).get(), cust.get(), namespaceCust2alias.get(cust), namespace);
+ index++;
+ }
+ }
+
+ @Test
+ public void testGetManagedKeyWithCustomNamespaceInactive() throws Exception {
+ Bytes firstCust = namespaceCust2key.keySet().iterator().next();
+ String customNamespace = "table1/cf1";
+ String encCust = Base64.getEncoder().encodeToString(firstCust.get());
+ // Set active status to false using the new namespace-aware format
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encCust + "." + customNamespace
+ + ".active", "false");
+
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(firstCust.get(), customNamespace);
+ assertNotNull(keyData);
+ assertKeyDataWithNamespace(keyData, ManagedKeyState.INACTIVE,
+ namespaceCust2key.get(firstCust).get(), firstCust.get(), namespaceCust2alias.get(firstCust),
+ customNamespace);
+ }
+
+ @Test
+ public void testGetManagedKeyWithInvalidCustomNamespace() throws Exception {
+ byte[] invalidCustBytes = "invalid".getBytes();
+ String customNamespace = "invalid/namespace";
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(invalidCustBytes, customNamespace);
+ assertNotNull(keyData);
+ assertKeyDataWithNamespace(keyData, ManagedKeyState.FAILED, null, invalidCustBytes, null,
+ customNamespace);
+ }
+
+ @Test
+ public void testNamespaceMismatchReturnsFailedKey() throws Exception {
+ // Use existing namespace key but request with different namespace
+ Bytes firstCust = namespaceCust2key.keySet().iterator().next();
+ String requestedNamespace = "table2/cf2"; // Different namespace from what's configured!
+
+ // Request key with different namespace - should fail
+ ManagedKeyData keyData =
+ managedKeyProvider.getManagedKey(firstCust.get(), requestedNamespace);
+
+ assertNotNull(keyData);
+ assertEquals(ManagedKeyState.FAILED, keyData.getKeyState());
+ assertNull(keyData.getTheKey());
+ assertEquals(requestedNamespace, keyData.getKeyNamespace());
+ assertEquals(firstCust, keyData.getKeyCustodian());
+ }
+
+ @Test
+ public void testNamespaceMatchReturnsKey() throws Exception {
+ // Use existing namespace key and request with matching namespace
+ Bytes firstCust = namespaceCust2key.keySet().iterator().next();
+ String configuredNamespace = "table1/cf1"; // This matches our test setup
+
+ ManagedKeyData keyData =
+ managedKeyProvider.getManagedKey(firstCust.get(), configuredNamespace);
+
+ assertNotNull(keyData);
+ assertEquals(ManagedKeyState.ACTIVE, keyData.getKeyState());
+ assertNotNull(keyData.getTheKey());
+ assertEquals(configuredNamespace, keyData.getKeyNamespace());
+ }
+
+ @Test
+ public void testGlobalKeyAccessedWithWrongNamespaceFails() throws Exception {
+ // Get a global key (one from cust2key)
+ Bytes globalCust = cust2key.keySet().iterator().next();
+
+ // Try to access it with a custom namespace - should fail
+ String wrongNamespace = "table1/cf1";
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(globalCust.get(), wrongNamespace);
+
+ assertNotNull(keyData);
+ assertEquals(ManagedKeyState.FAILED, keyData.getKeyState());
+ assertNull(keyData.getTheKey());
+ assertEquals(wrongNamespace, keyData.getKeyNamespace());
+ }
+
+ @Test
+ public void testNamespaceKeyAccessedAsGlobalFails() throws Exception {
+ // Get a namespace-specific key
+ Bytes namespaceCust = namespaceCust2key.keySet().iterator().next();
+
+ // Try to access it as global - should fail
+ ManagedKeyData keyData =
+ managedKeyProvider.getManagedKey(namespaceCust.get(), ManagedKeyData.KEY_SPACE_GLOBAL);
+
+ assertNotNull(keyData);
+ assertEquals(ManagedKeyState.FAILED, keyData.getKeyState());
+ assertNull(keyData.getTheKey());
+ assertEquals(ManagedKeyData.KEY_SPACE_GLOBAL, keyData.getKeyNamespace());
+ }
+
+ @Test
+ public void testMultipleNamespacesForSameCustodianFail() throws Exception {
+ // Use existing namespace custodian
+ Bytes namespaceCust = namespaceCust2key.keySet().iterator().next();
+ String configuredNamespace = "table1/cf1"; // This matches our test setup
+ String differentNamespace = "table2";
+
+ // Verify we can access with configured namespace
+ ManagedKeyData keyData1 =
+ managedKeyProvider.getManagedKey(namespaceCust.get(), configuredNamespace);
+ assertEquals(ManagedKeyState.ACTIVE, keyData1.getKeyState());
+ assertEquals(configuredNamespace, keyData1.getKeyNamespace());
+
+ // But accessing with different namespace should fail (even though it's the same custodian)
+ ManagedKeyData keyData2 =
+ managedKeyProvider.getManagedKey(namespaceCust.get(), differentNamespace);
+ assertEquals(ManagedKeyState.FAILED, keyData2.getKeyState());
+ assertEquals(differentNamespace, keyData2.getKeyNamespace());
+ }
+
+ @Test
+ public void testNullNamespaceDefaultsToGlobal() throws Exception {
+ // Get a global key (one from cust2key)
+ Bytes globalCust = cust2key.keySet().iterator().next();
+
+ // Call getManagedKey with null namespace - should default to global and succeed
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(globalCust.get(), null);
+
+ assertNotNull(keyData);
+ assertEquals(ManagedKeyState.ACTIVE, keyData.getKeyState());
+ assertNotNull(keyData.getTheKey());
+ assertEquals(ManagedKeyData.KEY_SPACE_GLOBAL, keyData.getKeyNamespace());
+ }
+
+ @Test
+ public void testFailedKeyContainsProperMetadataWithAlias() throws Exception {
+ // Use existing namespace key but request with different namespace
+ Bytes firstCust = namespaceCust2key.keySet().iterator().next();
+ String wrongNamespace = "wrong/namespace";
+
+ // Request with wrong namespace - should fail but have proper metadata
+ ManagedKeyData keyData = managedKeyProvider.getManagedKey(firstCust.get(), wrongNamespace);
+
+ assertNotNull(keyData);
+ assertEquals(ManagedKeyState.FAILED, keyData.getKeyState());
+ assertNull(keyData.getTheKey());
+ assertEquals(wrongNamespace, keyData.getKeyNamespace());
+
+ // Verify the failed key metadata - should have null alias since wrong namespace is requested
+ // This is the correct security behavior - don't leak alias information across namespaces
+ String expectedEncodedCust = Base64.getEncoder().encodeToString(firstCust.get());
+ assertMetadataMatches(keyData.getKeyMetadata(), null, expectedEncodedCust, wrongNamespace);
+ }
+
+ @Test
+ public void testBackwardsCompatibilityForGenerateKeyMetadata() {
+ String alias = "test-alias";
+ String encodedCust = "dGVzdA==";
+
+ // Test the old method (should default to global namespace)
+ String oldMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(alias, encodedCust);
+
+ // Test the new method with explicit global namespace
+ String newMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(alias, encodedCust,
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+
+ assertEquals(
+ "Old and new metadata generation should produce same result for global namespace",
+ oldMetadata, newMetadata);
+
+ // Verify both contain the namespace field
+ Map oldMap = parseKeyMetadata(oldMetadata);
+ Map newMap = parseKeyMetadata(newMetadata);
+
+ assertEquals(ManagedKeyData.KEY_SPACE_GLOBAL,
+ oldMap.get(ManagedKeyStoreKeyProvider.KEY_METADATA_NAMESPACE));
+ assertEquals(ManagedKeyData.KEY_SPACE_GLOBAL,
+ newMap.get(ManagedKeyStoreKeyProvider.KEY_METADATA_NAMESPACE));
+ }
+
private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState, byte[] key,
byte[] custBytes, String alias) throws Exception {
+ assertKeyDataWithNamespace(keyData, expKeyState, key, custBytes, alias,
+ ManagedKeyData.KEY_SPACE_GLOBAL);
+ }
+
+ /**
+ * Helper method to parse key metadata JSON string into a Map
+ */
+ @SuppressWarnings("unchecked")
+ private Map parseKeyMetadata(String keyMetadataStr) {
+ return GsonUtil.getDefaultInstance().fromJson(keyMetadataStr, HashMap.class);
+ }
+
+ /**
+ * Helper method to assert metadata contents
+ */
+ private void assertMetadataContains(Map metadata, String expectedAlias,
+ String expectedEncodedCust, String expectedNamespace) {
+ assertNotNull("Metadata should not be null", metadata);
+ assertEquals("Metadata should contain expected alias", expectedAlias,
+ metadata.get(KEY_METADATA_ALIAS));
+ assertEquals("Metadata should contain expected encoded custodian", expectedEncodedCust,
+ metadata.get(KEY_METADATA_CUST));
+ assertEquals("Metadata should contain expected namespace", expectedNamespace,
+ metadata.get(ManagedKeyStoreKeyProvider.KEY_METADATA_NAMESPACE));
+ }
+
+ /**
+ * Helper method to parse and assert metadata contents in one call
+ */
+ private void assertMetadataMatches(String keyMetadataStr, String expectedAlias,
+ String expectedEncodedCust, String expectedNamespace) {
+ Map metadata = parseKeyMetadata(keyMetadataStr);
+ assertMetadataContains(metadata, expectedAlias, expectedEncodedCust, expectedNamespace);
+ }
+
+ private void assertKeyDataWithNamespace(ManagedKeyData keyData, ManagedKeyState expKeyState,
+ byte[] key, byte[] custBytes, String alias, String expectedNamespace) throws Exception {
assertNotNull(keyData);
assertEquals(expKeyState, keyData.getKeyState());
+ assertEquals(expectedNamespace, keyData.getKeyNamespace());
if (key == null) {
assertNull(keyData.getTheKey());
} else {
@@ -213,13 +477,12 @@ private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState,
assertEquals(key.length, keyBytes.length);
assertEquals(new Bytes(key), keyBytes);
}
- Map keyMetadata =
- GsonUtil.getDefaultInstance().fromJson(keyData.getKeyMetadata(), HashMap.class);
- assertNotNull(keyMetadata);
+
+ // Use helper method instead of duplicated parsing logic
+ String encodedCust = Base64.getEncoder().encodeToString(custBytes);
+ assertMetadataMatches(keyData.getKeyMetadata(), alias, encodedCust, expectedNamespace);
+
assertEquals(new Bytes(custBytes), keyData.getKeyCustodian());
- assertEquals(alias, keyMetadata.get(KEY_METADATA_ALIAS));
- assertEquals(Base64.getEncoder().encodeToString(custBytes),
- keyMetadata.get(KEY_METADATA_CUST));
assertEquals(keyData, managedKeyProvider.unwrapKey(keyData.getKeyMetadata(), null));
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
index 957c3c8f726d..6fbd177437fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java
@@ -22,7 +22,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.crypto.Encryption;
-import org.apache.hadoop.hbase.io.crypto.KeyProvider;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
import org.apache.hadoop.hbase.security.SecurityUtil;
@@ -73,17 +72,11 @@ protected Configuration getConfiguration() {
/**
* A utility method for getting the managed key provider.
- * @return the key provider
- * @throws RuntimeException if no provider is configured or if the configured provider is not an
- * instance of ManagedKeyProvider
+ * @return the managed key provider
+ * @throws RuntimeException if no provider is configured
*/
protected ManagedKeyProvider getKeyProvider() {
- KeyProvider provider = Encryption.getKeyProvider(getConfiguration());
- if (!(provider instanceof ManagedKeyProvider)) {
- throw new RuntimeException("KeyProvider: " + provider.getClass().getName()
- + " expected to be of type ManagedKeyProvider");
- }
- return (ManagedKeyProvider) provider;
+ return Encryption.getManagedKeyProvider(getConfiguration());
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 995f7fa6c47f..fde89d122e28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -81,7 +81,6 @@
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.quotas.RegionSizeStore;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@@ -337,9 +336,8 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family,
private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException {
return new StoreContext.Builder().withBlockSize(family.getBlocksize())
- .withEncryptionContext(SecurityUtil.createEncryptionContext(conf, family,
- region.getManagedKeyDataCache(), region.getSystemKeyCache(),
- KeyNamespaceUtil.constructKeyNamespace(region.getTableDescriptor(), family)))
+ .withEncryptionContext(SecurityUtil.createEncryptionContext(conf, region.getTableDescriptor(),
+ family, region.getManagedKeyDataCache(), region.getSystemKeyCache()))
.withBloomType(family.getBloomFilterType()).withCacheConfig(createCacheConf(family))
.withCellComparator(region.getTableDescriptor().isMetaTable() || conf
.getBoolean(HRegion.USE_META_CELL_COMPARATOR, HRegion.DEFAULT_USE_META_CELL_COMPARATOR)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
index 2e6e4cb4f933..3fe2937e4d6a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
@@ -24,14 +24,18 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Security related generic utility methods.
@@ -39,6 +43,8 @@
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class SecurityUtil {
+ private static final Logger LOG = LoggerFactory.getLogger(SecurityUtil.class);
+
/**
* Get the user name from a principal
*/
@@ -61,74 +67,135 @@ public static String getPrincipalWithoutRealm(final String principal) {
/**
* Helper to create an encyption context with current encryption key, suitable for writes.
* @param conf The current configuration.
+ * @param tableDescriptor The table descriptor.
* @param family The current column descriptor.
* @param managedKeyDataCache The managed key data cache.
* @param systemKeyCache The system key cache.
- * @param keyNamespace The key namespace.
* @return The created encryption context.
* @throws IOException if an encryption key for the column cannot be unwrapped
* @throws IllegalStateException in case of encryption related configuration errors
*/
public static Encryption.Context createEncryptionContext(Configuration conf,
- ColumnFamilyDescriptor family, ManagedKeyDataCache managedKeyDataCache,
- SystemKeyCache systemKeyCache, String keyNamespace) throws IOException {
+ TableDescriptor tableDescriptor, ColumnFamilyDescriptor family,
+ ManagedKeyDataCache managedKeyDataCache, SystemKeyCache systemKeyCache) throws IOException {
Encryption.Context cryptoContext = Encryption.Context.NONE;
+ boolean isKeyManagementEnabled = isKeyManagementEnabled(conf);
String cipherName = family.getEncryptionType();
+ String keyNamespace = null; // Will be set by fallback logic
+ LOG.debug("Creating encryption context for table: {} and column family: {}",
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString());
if (cipherName != null) {
if (!Encryption.isEncryptionEnabled(conf)) {
throw new IllegalStateException("Encryption for family '" + family.getNameAsString()
+ "' configured with type '" + cipherName + "' but the encryption feature is disabled");
}
+ if (isKeyManagementEnabled && systemKeyCache == null) {
+ throw new IOException("Key management is enabled, but SystemKeyCache is null");
+ }
Cipher cipher = null;
Key key = null;
- ManagedKeyData kekKeyData = null;
- if (isKeyManagementEnabled(conf)) {
- kekKeyData = managedKeyDataCache.getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES,
- keyNamespace);
- // If no active key found in the specific namespace, try the global namespace
- if (kekKeyData == null) {
- kekKeyData = managedKeyDataCache.getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES,
- ManagedKeyData.KEY_SPACE_GLOBAL);
- keyNamespace = ManagedKeyData.KEY_SPACE_GLOBAL;
- }
- if (kekKeyData == null) {
- throw new IOException(
- "No active key found for custodian: " + ManagedKeyData.KEY_GLOBAL_CUSTODIAN
- + " in namespaces: " + keyNamespace + " and " + ManagedKeyData.KEY_SPACE_GLOBAL);
- }
- if (
- conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY,
- HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_DEFAULT_ENABLED)
- ) {
- cipher =
- getCipherIfValid(conf, cipherName, kekKeyData.getTheKey(), family.getNameAsString());
- } else {
- key = kekKeyData.getTheKey();
- kekKeyData = systemKeyCache.getLatestSystemKey();
+ ManagedKeyData kekKeyData =
+ isKeyManagementEnabled ? systemKeyCache.getLatestSystemKey() : null;
+
+ // Scenario 1: If family has a key, unwrap it and use that as DEK.
+ byte[] familyKeyBytes = family.getEncryptionKey();
+ if (familyKeyBytes != null) {
+ try {
+ if (isKeyManagementEnabled) {
+ // Scenario 1a: If key management is enabled, use STK for both unwrapping and KEK.
+ key = EncryptionUtil.unwrapKey(conf, null, familyKeyBytes, kekKeyData.getTheKey());
+ } else {
+ // Scenario 1b: If key management is disabled, unwrap the key using master key.
+ key = EncryptionUtil.unwrapKey(conf, familyKeyBytes);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Scenario 1: Use family key for namespace {} cipher: {} "
+ + "key management enabled: {}", keyNamespace, cipherName, isKeyManagementEnabled);
+ }
+ } catch (KeyException e) {
+ throw new IOException(e);
}
} else {
- byte[] keyBytes = family.getEncryptionKey();
- if (keyBytes != null) {
- // Family provides specific key material
- key = EncryptionUtil.unwrapKey(conf, keyBytes);
+ if (isKeyManagementEnabled) {
+ boolean localKeyGenEnabled =
+ conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_ENABLED_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_LOCAL_KEY_GEN_PER_FILE_DEFAULT_ENABLED);
+ // Implement 4-step fallback logic for key namespace resolution in the order of
+ // 1. CF KEY_NAMESPACE attribute
+ // 2. Constructed namespace
+ // 3. Table name
+ // 4. Global namespace
+ String[] candidateNamespaces = { family.getEncryptionKeyNamespace(),
+ KeyNamespaceUtil.constructKeyNamespace(tableDescriptor, family),
+ tableDescriptor.getTableName().getNameAsString(), ManagedKeyData.KEY_SPACE_GLOBAL };
+
+ ManagedKeyData activeKeyData = null;
+ for (String candidate : candidateNamespaces) {
+ if (candidate != null) {
+ // Log information on the table and column family we are looking for the active key in
+ LOG.debug("Looking for active key for table: {} and column family: {}",
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString());
+ activeKeyData = managedKeyDataCache
+ .getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, candidate);
+ if (activeKeyData != null) {
+ keyNamespace = candidate;
+ break;
+ }
+ }
+ }
+
+ // Scenario 2: There is an active key
+ if (activeKeyData != null) {
+ if (!localKeyGenEnabled) {
+ // Scenario 2a: Use active key as DEK and latest STK as KEK
+ key = activeKeyData.getTheKey();
+ } else {
+ // Scenario 2b: Use active key as KEK and generate local key as DEK
+ kekKeyData = activeKeyData;
+ // TODO: Use the active key as a seed to generate the local key instead of
+ // random generation
+ cipher = getCipherIfValid(conf, cipherName, activeKeyData.getTheKey(),
+ family.getNameAsString());
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Scenario 2: Use active key for namespace {} cipher: {} "
+ + "localKeyGenEnabled: {} for table: {} and column family: {}",
+ keyNamespace, cipherName, localKeyGenEnabled,
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString(),
+ activeKeyData.getKeyNamespace());
+ }
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Scenario 3a: No active key found for table: {} and column family: {}",
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString());
+ }
+ // Scenario 3a: Do nothing, let a random key be generated as DEK and if key management
+ // is enabled, let STK be used as KEK.
+ }
} else {
- cipher = getCipherIfValid(conf, cipherName, null, null);
+ // Scenario 3b: Do nothing, let a random key be generated as DEK, let STK be used as KEK.
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(
+ "Scenario 3b: Key management is disabled and no ENCRYPTION_KEY attribute "
+ + "set for table: {} and column family: {}",
+ tableDescriptor.getTableName().getNameAsString(), family.getNameAsString());
+ }
}
}
- if (key != null || cipher != null) {
- if (key == null) {
- // Family does not provide key material, create a random key
- key = cipher.getRandomKey();
- }
- if (cipher == null) {
- cipher = getCipherIfValid(conf, cipherName, key, family.getNameAsString());
- }
- cryptoContext = Encryption.newContext(conf);
- cryptoContext.setCipher(cipher);
- cryptoContext.setKey(key);
- cryptoContext.setKeyNamespace(keyNamespace);
- cryptoContext.setKEKData(kekKeyData);
+
+ if (cipher == null) {
+ cipher =
+ getCipherIfValid(conf, cipherName, key, key == null ? null : family.getNameAsString());
+ }
+ if (key == null) {
+ key = cipher.getRandomKey();
}
+ cryptoContext = Encryption.newContext(conf);
+ cryptoContext.setCipher(cipher);
+ cryptoContext.setKey(key);
+ cryptoContext.setKeyNamespace(keyNamespace);
+ cryptoContext.setKEKData(kekKeyData);
}
return cryptoContext;
}
@@ -149,15 +216,38 @@ public static Encryption.Context createEncryptionContext(Configuration conf, Pat
ManagedKeyData kekKeyData = null;
byte[] keyBytes = trailer.getEncryptionKey();
Encryption.Context cryptoContext = Encryption.Context.NONE;
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating encryption context for path: {}", path);
+ }
// Check for any key material available
if (keyBytes != null) {
cryptoContext = Encryption.newContext(conf);
Key kek = null;
- // When the KEK medata is available, we will try to unwrap the encrypted key using the KEK,
- // otherwise we will use the system keys starting from the latest to the oldest.
- if (trailer.getKEKMetadata() != null) {
+
+ // When there is key material, determine the appropriate KEK
+ boolean isKeyManagementEnabled = isKeyManagementEnabled(conf);
+ if (((trailer.getKEKChecksum() != 0L) || isKeyManagementEnabled) && systemKeyCache == null) {
+ throw new IOException("SystemKeyCache can't be null when using key management feature");
+ }
+ if ((trailer.getKEKChecksum() != 0L && !isKeyManagementEnabled)) {
+ throw new IOException(
+ "Seeing newer trailer with KEK checksum, but key management is disabled");
+ }
+
+ // Try STK lookup first if checksum is available and system key cache is not null.
+ if (trailer.getKEKChecksum() != 0L && trailer.getKeyNamespace() == null) {
+ ManagedKeyData systemKeyData =
+ systemKeyCache.getSystemKeyByChecksum(trailer.getKEKChecksum());
+ if (systemKeyData != null) {
+ kek = systemKeyData.getTheKey();
+ kekKeyData = systemKeyData;
+ }
+ }
+
+ // If STK lookup failed or no checksum available, try managed key lookup using metadata
+ if (kek == null && trailer.getKEKMetadata() != null) {
if (managedKeyDataCache == null) {
- throw new IOException("Key management is enabled, but ManagedKeyDataCache is null");
+ throw new IOException("KEK metadata is available, but ManagedKeyDataCache is null");
}
Throwable cause = null;
try {
@@ -172,21 +262,17 @@ public static Encryption.Context createEncryptionContext(Configuration conf, Pat
"Failed to get key data for KEK metadata: " + trailer.getKEKMetadata(), cause);
}
kek = kekKeyData.getTheKey();
- } else {
- if (SecurityUtil.isKeyManagementEnabled(conf)) {
- if (systemKeyCache == null) {
- throw new IOException("Key management is enabled, but SystemKeyCache is null");
- }
- ManagedKeyData systemKeyData =
- systemKeyCache.getSystemKeyByChecksum(trailer.getKEKChecksum());
- if (systemKeyData == null) {
- throw new IOException(
- "Failed to get system key by checksum: " + trailer.getKEKChecksum());
- }
- kek = systemKeyData.getTheKey();
- kekKeyData = systemKeyData;
+ } else if (kek == null && isKeyManagementEnabled) {
+ // No checksum or metadata available, fall back to latest system key for backwards
+ // compatibility
+ ManagedKeyData systemKeyData = systemKeyCache.getLatestSystemKey();
+ if (systemKeyData == null) {
+ throw new IOException("Failed to get latest system key");
}
+ kek = systemKeyData.getTheKey();
+ kekKeyData = systemKeyData;
}
+
Key key;
if (kek != null) {
try {
@@ -202,6 +288,7 @@ public static Encryption.Context createEncryptionContext(Configuration conf, Pat
Cipher cipher = getCipherIfValid(conf, key.getAlgorithm(), key, null);
cryptoContext.setCipher(cipher);
cryptoContext.setKey(key);
+ cryptoContext.setKeyNamespace(trailer.getKeyNamespace());
cryptoContext.setKEKData(kekKeyData);
}
return cryptoContext;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
index 192343ae41d3..eb4d72c7745f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/EncryptionTest.java
@@ -28,7 +28,9 @@
import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.KeyStoreKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider;
import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.security.SecurityUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -48,12 +50,23 @@ private EncryptionTest() {
* Check that the configured key provider can be loaded and initialized, or throw an exception.
*/
public static void testKeyProvider(final Configuration conf) throws IOException {
- String providerClassName =
- conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName());
+ boolean isKeyManagementEnabled = SecurityUtil.isKeyManagementEnabled(conf);
+ String providerClassName;
+ if (isKeyManagementEnabled) {
+ providerClassName = conf.get(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ ManagedKeyStoreKeyProvider.class.getName());
+ } else {
+ providerClassName =
+ conf.get(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyStoreKeyProvider.class.getName());
+ }
Boolean result = keyProviderResults.get(providerClassName);
if (result == null) {
try {
- Encryption.getKeyProvider(conf);
+ if (isKeyManagementEnabled) {
+ Encryption.getManagedKeyProvider(conf);
+ } else {
+ Encryption.getKeyProvider(conf);
+ }
keyProviderResults.put(providerClassName, true);
} catch (Exception e) { // most likely a RuntimeException
keyProviderResults.put(providerClassName, false);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
index 3053e72ecea7..c91539b7ed68 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
@@ -35,8 +35,8 @@ public ManagedKeyProviderInterceptor() {
}
@Override
- public void initConfig(Configuration conf) {
- spy.initConfig(conf);
+ public void initConfig(Configuration conf, String providerParameters) {
+ spy.initConfig(conf, providerParameters);
}
@Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
index f3b2e2ca1ade..3c337ce72131 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
@@ -19,34 +19,108 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
import org.junit.After;
import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ManagedKeyTestBase {
+ private static final Logger LOG = LoggerFactory.getLogger(ManagedKeyTestBase.class);
+
protected HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
@Before
public void setUp() throws Exception {
- TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
- getKeyProviderClass().getName());
- TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
- TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes",
- KeymetaServiceEndpoint.class.getName());
+ if (isWithKeyManagement()) {
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ getKeyProviderClass().getName());
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes",
+ KeymetaServiceEndpoint.class.getName());
+ }
+
+ // Start the minicluster if needed
+ if (isWithMiniClusterStart()) {
+ LOG.info("\n\nManagedKeyTestBase.setUp: Starting minicluster\n");
+ startMiniCluster();
+ LOG.info("\n\nManagedKeyTestBase.setUp: Minicluster successfully started\n");
+ }
+ }
- // Start the minicluster
+ protected void startMiniCluster() throws Exception {
+ startMiniCluster(getSystemTableNameToWaitFor());
+ }
+
+ protected void startMiniCluster(TableName tableNameToWaitFor) throws Exception {
TEST_UTIL.startMiniCluster(1);
+ waitForMasterInitialization(tableNameToWaitFor);
+ }
+
+ protected void restartMiniCluster() throws Exception {
+ restartMiniCluster(getSystemTableNameToWaitFor());
+ }
+
+ protected void restartMiniCluster(TableName tableNameToWaitFor) throws Exception {
+ LOG.info("\n\nManagedKeyTestBase.restartMiniCluster: Flushing caches\n");
+ TEST_UTIL.flush();
+
+ LOG.info("\n\nManagedKeyTestBase.restartMiniCluster: Shutting down cluster\n");
+ TEST_UTIL.shutdownMiniHBaseCluster();
+
+ LOG.info("\n\nManagedKeyTestBase.restartMiniCluster: Sleeping a bit\n");
+ Thread.sleep(2000);
+
+ LOG.info("\n\nManagedKeyTestBase.restartMiniCluster: Starting the cluster back up\n");
+ TEST_UTIL.restartHBaseCluster(1);
+
+ waitForMasterInitialization(tableNameToWaitFor);
+ }
+
+ private void waitForMasterInitialization(TableName tableNameToWaitFor) throws Exception {
+ LOG.info(
+ "\n\nManagedKeyTestBase.waitForMasterInitialization: Waiting for master initialization\n");
TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
- TEST_UTIL.waitUntilAllRegionsAssigned(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+
+ LOG.info(
+ "\n\nManagedKeyTestBase.waitForMasterInitialization: Waiting for regions to be assigned\n");
+ TEST_UTIL.waitUntilAllRegionsAssigned(tableNameToWaitFor);
+ LOG.info("\n\nManagedKeyTestBase.waitForMasterInitialization: Regions assigned\n");
}
@After
public void tearDown() throws Exception {
+ LOG.info("\n\nManagedKeyTestBase.tearDown: Shutting down cluster\n");
TEST_UTIL.shutdownMiniCluster();
+ LOG.info("\n\nManagedKeyTestBase.tearDown: Cluster successfully shut down\n");
+ // Clear the provider cache to avoid test interference
+ Encryption.clearKeyProviderCache();
}
protected Class extends ManagedKeyProvider> getKeyProviderClass() {
return MockManagedKeyProvider.class;
}
+
+ protected boolean isWithKeyManagement() {
+ return true;
+ }
+
+ protected boolean isWithMiniClusterStart() {
+ return true;
+ }
+
+ protected TableName getSystemTableNameToWaitFor() {
+ return KeymetaTableAccessor.KEY_META_TABLE_NAME;
+ }
+
+ /**
+ * Useful hook to enable setting a breakpoint while debugging ruby tests, just log a message and
+ * you can even have a conditional breakpoint.
+ */
+ protected void logMessage(String msg) {
+ LOG.info(msg);
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
index 8ae91de6588f..3f6ddad6a1ee 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
@@ -43,7 +43,7 @@ public class TestKeyManagementBase {
public void testGetKeyProviderWithInvalidProvider() throws Exception {
// Setup configuration with a non-ManagedKeyProvider
Configuration conf = new Configuration();
- conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
"org.apache.hadoop.hbase.keymeta.DummyKeyProvider");
MasterServices mockServer = mock(MasterServices.class);
@@ -52,16 +52,21 @@ public void testGetKeyProviderWithInvalidProvider() throws Exception {
final KeyManagementBase keyMgmt = new TestKeyManagement(mockServer);
assertEquals(mockServer, keyMgmt.getKeyManagementService());
- // Should throw RuntimeException when provider is not ManagedKeyProvider
+ // Should throw RuntimeException when provider cannot be cast to ManagedKeyProvider
RuntimeException exception = assertThrows(RuntimeException.class, () -> {
keyMgmt.getKeyProvider();
});
- assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider"));
+ // The error message will be about ClassCastException since DummyKeyProvider doesn't implement
+ // ManagedKeyProvider
+ assertTrue(exception.getMessage().contains("ClassCastException")
+ || exception.getCause() instanceof ClassCastException);
+
exception = assertThrows(RuntimeException.class, () -> {
KeyManagementBase keyMgmt2 = new TestKeyManagement(conf);
keyMgmt2.getKeyProvider();
});
- assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider"));
+ assertTrue(exception.getMessage().contains("ClassCastException")
+ || exception.getCause() instanceof ClassCastException);
assertThrows(IllegalArgumentException.class, () -> {
Configuration configuration = null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java
index 3fe669f90d80..bfd8be319895 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementService.java
@@ -63,8 +63,11 @@ public class TestKeyManagementService {
@Before
public void setUp() throws Exception {
+ // Clear provider cache to avoid interference from other tests
+ Encryption.clearKeyProviderCache();
conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
- conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName());
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
conf.set(HConstants.HBASE_ORIGINAL_DIR, "/tmp/hbase");
}
@@ -72,7 +75,8 @@ public void setUp() throws Exception {
public void testDefaultKeyManagementServiceCreation() throws IOException {
// SystemKeyCache needs at least one valid key to be created, so setting up a mock FS that
// returns a mock file that returns a known mocked key metadata.
- MockManagedKeyProvider provider = (MockManagedKeyProvider) Encryption.getKeyProvider(conf);
+ MockManagedKeyProvider provider =
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(conf);
ManagedKeyData keyData =
provider.getManagedKey("system".getBytes(), ManagedKeyData.KEY_SPACE_GLOBAL);
String fileName = SYSTEM_KEY_FILE_PREFIX + "1";
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
index b695dedcdf98..2afa235007c7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
@@ -121,7 +121,8 @@ public void setUp() throws Exception {
closeableMocks = MockitoAnnotations.openMocks(this);
conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
- conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName());
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
when(server.getConnection()).thenReturn(connection);
when(connection.getTable(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(table);
@@ -131,7 +132,7 @@ public void setUp() throws Exception {
accessor = new KeymetaTableAccessor(server);
managedKeyProvider = new MockManagedKeyProvider();
- managedKeyProvider.initConfig(conf);
+ managedKeyProvider.initConfig(conf, "");
latestSystemKey = managedKeyProvider.getSystemKey("system-id".getBytes());
when(systemKeyCache.getLatestSystemKey()).thenReturn(latestSystemKey);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
index 807586a9a476..6e2eef1f67a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java
@@ -132,7 +132,7 @@ public void setUp() {
Encryption.clearKeyProviderCache();
conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
- conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, providerClass.getName());
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY, providerClass.getName());
// Configure the server mock to return the configuration
when(server.getConfiguration()).thenReturn(conf);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
index 52659b6cf2a4..63f05e7ee5e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java
@@ -68,7 +68,7 @@ public void testEnableOverRPC() throws Exception {
private void doTestEnable(KeymetaAdmin adminClient) throws IOException, KeyException {
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
MockManagedKeyProvider managedKeyProvider =
- (MockManagedKeyProvider) Encryption.getKeyProvider(master.getConfiguration());
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(master.getConfiguration());
String cust = "cust1";
String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes());
List managedKeyStates =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
index 6592238add50..a2cb14223e17 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java
@@ -53,6 +53,7 @@
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Rule;
@@ -97,7 +98,8 @@ public void setUp() throws Exception {
fs = testRootDir.getFileSystem(conf);
conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
- conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName());
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
when(mockServer.getKeyManagementService()).thenReturn(mockServer);
when(mockServer.getFileSystem()).thenReturn(mockFileSystem);
@@ -105,6 +107,12 @@ public void setUp() throws Exception {
keymetaAdmin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor);
}
+ @After
+ public void tearDown() throws Exception {
+ // Clear the provider cache to avoid test interference
+ Encryption.clearKeyProviderCache();
+ }
+
@RunWith(BlockJUnit4ClassRunner.class)
@Category({ MasterTests.class, SmallTests.class })
public static class TestWhenDisabled extends TestKeymetaAdminImpl {
@@ -141,7 +149,7 @@ public static class TestAdminImpl extends TestKeymetaAdminImpl {
@Parameter(2)
public boolean isNullKey;
- @Parameters(name = "{index},keySpace={1},keyState={2}")
+ @Parameters(name = "{index},keySpace={0},keyState={1}")
public static Collection data() {
return Arrays.asList(new Object[][] { { KEY_SPACE_GLOBAL, ACTIVE, false },
{ "ns1", ACTIVE, false }, { KEY_SPACE_GLOBAL, FAILED, true },
@@ -151,7 +159,7 @@ public static Collection data() {
@Test
public void testEnableAndGet() throws Exception {
MockManagedKeyProvider managedKeyProvider =
- (MockManagedKeyProvider) Encryption.getKeyProvider(conf);
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(conf);
managedKeyProvider.setMockedKeyState(CUST, keyState);
when(keymetaAccessor.getActiveKey(CUST.getBytes(), keySpace))
.thenReturn(managedKeyProvider.getManagedKey(CUST.getBytes(), keySpace));
@@ -211,7 +219,7 @@ public static Collection data() {
@Test
public void test() throws Exception {
MockManagedKeyProvider managedKeyProvider =
- (MockManagedKeyProvider) Encryption.getKeyProvider(conf);
+ (MockManagedKeyProvider) Encryption.getManagedKeyProvider(conf);
String cust = "invalidcust1";
String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes());
managedKeyProvider.setMockedKey(cust, null, keySpace);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
index e73c181a74fd..54bfb5e0a120 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java
@@ -26,7 +26,6 @@
import java.security.Key;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.io.crypto.Encryption;
-import org.apache.hadoop.hbase.io.crypto.KeyProvider;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
@@ -51,7 +50,7 @@ public class TestSystemKeyManager extends ManagedKeyTestBase {
@Test
public void testSystemKeyInitializationAndRotation() throws Exception {
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
- KeyProvider keyProvider = Encryption.getKeyProvider(master.getConfiguration());
+ ManagedKeyProvider keyProvider = Encryption.getManagedKeyProvider(master.getConfiguration());
assertNotNull(keyProvider);
assertTrue(keyProvider instanceof ManagedKeyProvider);
assertTrue(keyProvider instanceof MockManagedKeyProvider);
@@ -85,7 +84,7 @@ public void testSystemKeyInitializationAndRotation() throws Exception {
@Test
public void testWithInvalidSystemKey() throws Exception {
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
- KeyProvider keyProvider = Encryption.getKeyProvider(master.getConfiguration());
+ ManagedKeyProvider keyProvider = Encryption.getManagedKeyProvider(master.getConfiguration());
MockManagedKeyProvider pbeKeyProvider = (MockManagedKeyProvider) keyProvider;
// Test startup failure when the cluster key is INACTIVE
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java
index ca2f8088a786..2077673fde8a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecurityUtil.java
@@ -39,7 +39,9 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.CipherProvider;
import org.apache.hadoop.hbase.io.crypto.Encryption;
@@ -47,6 +49,7 @@
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.MockAesKeyProvider;
import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer;
+import org.apache.hadoop.hbase.keymeta.KeyNamespaceUtil;
import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.testclassification.SecurityTests;
@@ -92,6 +95,7 @@ public class TestSecurityUtil {
protected HBaseTestingUtil testUtil;
protected Path testPath;
protected ColumnFamilyDescriptor mockFamily;
+ protected TableDescriptor mockTableDescriptor;
protected ManagedKeyDataCache mockManagedKeyDataCache;
protected SystemKeyCache mockSystemKeyCache;
protected FixedFileTrailer mockTrailer;
@@ -99,6 +103,7 @@ public class TestSecurityUtil {
protected Key testKey;
protected byte[] testWrappedKey;
protected Key kekKey;
+ protected String testTableNamespace;
/**
* Configuration builder for setting up different encryption test scenarios.
@@ -116,8 +121,8 @@ public TestConfigBuilder withEncryptionEnabled(boolean enabled) {
return this;
}
- public TestConfigBuilder withKeyManagement(boolean enabled, boolean localKeyGen) {
- this.keyManagementEnabled = enabled;
+ public TestConfigBuilder withKeyManagement(boolean localKeyGen) {
+ this.keyManagementEnabled = true;
this.localKeyGenEnabled = localKeyGen;
return this;
}
@@ -152,7 +157,7 @@ protected void setUpEncryptionConfig() {
// Set up real encryption configuration using default AES cipher
conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, true);
conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockAesKeyProvider.class.getName());
- conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
+ conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, HBASE_KEY);
// Enable key caching
conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, "true");
// Use DefaultCipherProvider for real AES encryption functionality
@@ -215,8 +220,8 @@ protected void assertExceptionContains(Class expectedTy
protected void assertEncryptionContextThrowsForWrites(Class extends Exception> expectedType,
String expectedMessage) {
Exception exception = assertThrows(Exception.class, () -> {
- SecurityUtil.createEncryptionContext(conf, mockFamily, mockManagedKeyDataCache,
- mockSystemKeyCache, TEST_NAMESPACE);
+ SecurityUtil.createEncryptionContext(conf, mockTableDescriptor, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache);
});
assertTrue("Expected exception type: " + expectedType.getName() + ", but got: "
+ exception.getClass().getName(), expectedType.isInstance(exception));
@@ -244,6 +249,7 @@ public void setUp() throws Exception {
// Setup mocks (only for objects that don't have encryption logic)
mockFamily = mock(ColumnFamilyDescriptor.class);
+ mockTableDescriptor = mock(TableDescriptor.class);
mockManagedKeyDataCache = mock(ManagedKeyDataCache.class);
mockSystemKeyCache = mock(SystemKeyCache.class);
mockTrailer = mock(FixedFileTrailer.class);
@@ -255,8 +261,13 @@ public void setUp() throws Exception {
// Configure mocks
when(mockFamily.getEncryptionType()).thenReturn(AES_CIPHER);
when(mockFamily.getNameAsString()).thenReturn(TEST_FAMILY);
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(null); // Default to null for fallback
+ // logic
+ when(mockTableDescriptor.getTableName()).thenReturn(TableName.valueOf("test:table"));
when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+ testTableNamespace = KeyNamespaceUtil.constructKeyNamespace(mockTableDescriptor, mockFamily);
+
// Set up default encryption config
setUpEncryptionConfig();
@@ -267,6 +278,12 @@ public void setUp() throws Exception {
testWrappedKey = EncryptionUtil.wrapKey(conf, null, key, kekKey);
}
+ private static byte[] createRandomWrappedKey(Configuration conf) throws IOException {
+ Cipher cipher = Encryption.getCipher(conf, "AES");
+ Key key = cipher.getRandomKey();
+ return EncryptionUtil.wrapKey(conf, HBASE_KEY, key);
+ }
+
@RunWith(BlockJUnit4ClassRunner.class)
@Category({ SecurityTests.class, SmallTests.class })
public static class TestBasic extends TestSecurityUtil {
@@ -327,8 +344,8 @@ public static class TestCreateEncryptionContext_ForWrites extends TestSecurityUt
public void testWithNoEncryptionOnFamily() throws IOException {
when(mockFamily.getEncryptionType()).thenReturn(null);
- Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
- mockManagedKeyDataCache, mockSystemKeyCache, "test-namespace");
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
assertEquals(Encryption.Context.NONE, result);
}
@@ -342,20 +359,47 @@ public void testWithEncryptionDisabled() throws IOException {
@Test
public void testWithKeyManagement_LocalKeyGen() throws IOException {
- configBuilder().withKeyManagement(true, true).apply(conf);
- setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData);
+ configBuilder().withKeyManagement(true).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
- Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
- mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
verifyContext(result);
}
@Test
- public void testWithKeyManagement_NoActiveKey() throws IOException {
- configBuilder().withKeyManagement(true, false).apply(conf);
- setupManagedKeyDataCache(TEST_NAMESPACE, ManagedKeyData.KEY_SPACE_GLOBAL, null);
- assertEncryptionContextThrowsForWrites(IOException.class, "No active key found");
+ public void testWithKeyManagement_NoActiveKey_NoSystemKeyCache() throws IOException {
+ // Test backwards compatibility: when no active key found and system cache is null, should
+ // throw
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, ManagedKeyData.KEY_SPACE_GLOBAL, null);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ // With null system key cache, should still throw IOException
+ Exception exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, mockTableDescriptor, mockFamily,
+ mockManagedKeyDataCache, null);
+ });
+ assertTrue("Should reference system key cache",
+ exception.getMessage().contains("SystemKeyCache"));
+ }
+
+ @Test
+ public void testWithKeyManagement_NoActiveKey_WithSystemKeyCache() throws IOException {
+ // Test backwards compatibility: when no active key found but system cache available, should
+ // work
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, ManagedKeyData.KEY_SPACE_GLOBAL, null);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Should use system key as KEK and generate random DEK
+ assertEquals(mockManagedKeyData, result.getKEKData());
}
@Test
@@ -365,8 +409,8 @@ public void testWithKeyManagement_LocalKeyGen_WithUnknownKeyCipher() throws IOEx
when(unknownKey.getAlgorithm()).thenReturn(UNKNOWN_CIPHER);
when(mockManagedKeyData.getTheKey()).thenReturn(unknownKey);
- configBuilder().withKeyManagement(true, true).apply(conf);
- setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData);
+ configBuilder().withKeyManagement(true).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
assertEncryptionContextThrowsForWrites(RuntimeException.class,
"Cipher 'UNKNOWN_CIPHER' is not");
}
@@ -377,44 +421,45 @@ public void testWithKeyManagement_LocalKeyGen_WithKeyAlgorithmMismatch() throws
when(desKey.getAlgorithm()).thenReturn(DES_CIPHER);
when(mockManagedKeyData.getTheKey()).thenReturn(desKey);
- configBuilder().withKeyManagement(true, true).apply(conf);
- setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData);
+ configBuilder().withKeyManagement(true).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
assertEncryptionContextThrowsForWrites(IllegalStateException.class,
"Encryption for family 'test-family' configured with type 'AES' but key specifies algorithm 'DES'");
}
@Test
public void testWithKeyManagement_UseSystemKeyWithNSSpecificActiveKey() throws IOException {
- configBuilder().withKeyManagement(true, false).apply(conf);
- setupManagedKeyDataCache(TEST_NAMESPACE, mockManagedKeyData);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
setupSystemKeyCache(mockManagedKeyData);
- Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
- mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
verifyContext(result);
}
@Test
public void testWithKeyManagement_UseSystemKeyWithoutNSSpecificActiveKey() throws IOException {
- configBuilder().withKeyManagement(true, false).apply(conf);
- setupManagedKeyDataCache(TEST_NAMESPACE, ManagedKeyData.KEY_SPACE_GLOBAL, mockManagedKeyData);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, ManagedKeyData.KEY_SPACE_GLOBAL,
+ mockManagedKeyData);
setupSystemKeyCache(mockManagedKeyData);
when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
- Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
- mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
verifyContext(result);
}
@Test
public void testWithoutKeyManagement_WithFamilyProvidedKey() throws Exception {
- when(mockFamily.getEncryptionKey()).thenReturn(testWrappedKey);
- configBuilder().withKeyManagement(false, false).apply(conf);
+ byte[] wrappedKey = createRandomWrappedKey(conf);
+ when(mockFamily.getEncryptionKey()).thenReturn(wrappedKey);
- Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
- mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
verifyContext(result, false);
}
@@ -426,28 +471,268 @@ public void testWithoutKeyManagement_KeyAlgorithmMismatch() throws Exception {
byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, HBASE_KEY, differentKey);
when(mockFamily.getEncryptionKey()).thenReturn(wrappedDESKey);
- configBuilder().withKeyManagement(false, false).apply(conf);
assertEncryptionContextThrowsForWrites(IllegalStateException.class,
"Encryption for family 'test-family' configured with type 'AES' but key specifies algorithm 'DES'");
}
@Test
- public void testWithoutKeyManagement_WithRandomKeyGeneration() throws IOException {
+ public void testWithUnavailableCipher() throws IOException {
+ when(mockFamily.getEncryptionType()).thenReturn(UNKNOWN_CIPHER);
+ setUpEncryptionConfigWithNullCipher();
+ assertEncryptionContextThrowsForWrites(IllegalStateException.class,
+ "Cipher 'UNKNOWN_CIPHER' is not available");
+ }
+
+ // ---- New backwards compatibility test scenarios ----
+
+ @Test
+ public void testBackwardsCompatibility_Scenario1_FamilyKeyWithKeyManagement()
+ throws IOException {
+ // Scenario 1: Family has encryption key -> use as DEK, latest STK as KEK
+ when(mockFamily.getEncryptionKey()).thenReturn(testWrappedKey);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that system key is used as KEK
+ assertEquals(mockManagedKeyData, result.getKEKData());
+ }
+
+ @Test
+ public void testBackwardsCompatibility_Scenario2a_ActiveKeyAsDeK() throws IOException {
+ // Scenario 2a: Active key exists, local key gen disabled -> use active key as DEK, latest STK
+ // as KEK
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ ManagedKeyData mockSystemKey = mock(ManagedKeyData.class);
+ when(mockSystemKey.getTheKey()).thenReturn(kekKey);
+ setupSystemKeyCache(mockSystemKey);
when(mockFamily.getEncryptionKey()).thenReturn(null);
- configBuilder().withKeyManagement(false, false).apply(conf);
- Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockFamily,
- mockManagedKeyDataCache, mockSystemKeyCache, TEST_NAMESPACE);
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that active key is used as DEK and system key as KEK
+ assertEquals(testKey, result.getKey()); // Active key should be the DEK
+ assertEquals(mockSystemKey, result.getKEKData()); // System key should be the KEK
+ }
+
+ @Test
+ public void testBackwardsCompatibility_Scenario2b_ActiveKeyAsKekWithLocalKeyGen()
+ throws IOException {
+ // Scenario 2b: Active key exists, local key gen enabled -> use active key as KEK, generate
+ // random DEK
+ configBuilder().withKeyManagement(true).apply(conf);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that active key is used as KEK and a generated key as DEK
+ assertNotNull("DEK should be generated", result.getKey());
+ assertEquals(mockManagedKeyData, result.getKEKData()); // Active key should be the KEK
+ }
+
+ @Test
+ public void testBackwardsCompatibility_Scenario3a_NoActiveKeyGenerateLocalKey()
+ throws IOException {
+ // Scenario 3: No active key -> generate random DEK, latest STK as KEK
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupManagedKeyDataCache(TEST_NAMESPACE, ManagedKeyData.KEY_SPACE_GLOBAL, null); // No active
+ // key
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that a random key is generated as DEK and system key as KEK
+ assertNotNull("DEK should be generated", result.getKey());
+ assertEquals(mockManagedKeyData, result.getKEKData()); // System key should be the KEK
+ }
+
+ @Test
+ public void testWithoutKeyManagement_Scenario3b_WithRandomKeyGeneration() throws IOException {
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
verifyContext(result, false);
+ // Here system key with a local key gen, so no namespace is set.
+ assertNull(result.getKeyNamespace());
}
@Test
- public void testWithUnavailableCipher() throws IOException {
- when(mockFamily.getEncryptionType()).thenReturn(UNKNOWN_CIPHER);
- setUpEncryptionConfigWithNullCipher();
- assertEncryptionContextThrowsForWrites(IllegalStateException.class,
- "Cipher 'UNKNOWN_CIPHER' is not available");
+ public void testFallbackRule1_CFKeyNamespaceAttribute() throws IOException {
+ // Test Rule 1: Column family has KEY_NAMESPACE attribute
+ String cfKeyNamespace = "cf-specific-namespace";
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(cfKeyNamespace);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ // Mock managed key data cache to return active key only for CF namespace
+ setupManagedKeyDataCache(cfKeyNamespace, mockManagedKeyData);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that CF-specific namespace was used
+ assertEquals(cfKeyNamespace, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRule2_ConstructedNamespace() throws IOException {
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(null); // No CF namespace
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ setupManagedKeyDataCache(testTableNamespace, mockManagedKeyData);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupSystemKeyCache(mockManagedKeyData);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that constructed namespace was used
+ assertEquals(testTableNamespace, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRule3_TableNameAsNamespace() throws IOException {
+ // Test Rule 3: Use table name as namespace when CF namespace and constructed namespace fail
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(null); // No CF namespace
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ String tableName = "test:table";
+ when(mockTableDescriptor.getTableName()).thenReturn(TableName.valueOf(tableName));
+
+ // Mock cache to fail for CF and constructed namespace, succeed for table name
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(testTableNamespace))).thenReturn(null); // Constructed namespace fails
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(tableName))).thenReturn(mockManagedKeyData); // Table name succeeds
+
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that table name was used as namespace
+ assertEquals(tableName, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRule4_GlobalNamespace() throws IOException {
+ // Test Rule 4: Fall back to global namespace when all other rules fail
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(null); // No CF namespace
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ String tableName = "test:table";
+ when(mockTableDescriptor.getTableName()).thenReturn(TableName.valueOf(tableName));
+
+ // Mock cache to fail for all specific namespaces, succeed only for global
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(testTableNamespace))).thenReturn(null); // Constructed namespace fails
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(tableName))).thenReturn(null); // Table name fails
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(ManagedKeyData.KEY_SPACE_GLOBAL))).thenReturn(mockManagedKeyData); // Global succeeds
+
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that global namespace was used
+ assertEquals(ManagedKeyData.KEY_SPACE_GLOBAL, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testFallbackRuleOrder() throws IOException {
+ // Test that the rules are tried in the correct order
+ String cfKeyNamespace = "cf-namespace";
+ String tableName = "test:table";
+
+ when(mockFamily.getEncryptionKeyNamespace()).thenReturn(cfKeyNamespace);
+ when(mockFamily.getEncryptionKey()).thenReturn(null);
+ when(mockTableDescriptor.getTableName()).thenReturn(TableName.valueOf(tableName));
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ // Set up mocks so that CF namespace fails but table name would succeed
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(cfKeyNamespace))).thenReturn(null); // CF namespace fails
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(testTableNamespace))).thenReturn(mockManagedKeyData); // Constructed namespace succeeds
+ when(mockManagedKeyDataCache.getActiveEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
+ eq(tableName))).thenReturn(mockManagedKeyData); // Table name would also succeed
+
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(testKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Verify that constructed namespace was used (Rule 2), not table name (Rule 3)
+ assertEquals(testTableNamespace, result.getKeyNamespace());
+ }
+
+ @Test
+ public void testBackwardsCompatibility_Scenario1_FamilyKeyWithoutKeyManagement()
+ throws IOException {
+ // Scenario 1 variation: Family has encryption key but key management disabled -> use as DEK,
+ // no KEK
+ byte[] wrappedKey = createRandomWrappedKey(conf);
+ when(mockFamily.getEncryptionKey()).thenReturn(wrappedKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, mockTableDescriptor,
+ mockFamily, mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result, false); // No key management, so no KEK data
+ }
+
+ @Test
+ public void testWithKeyManagement_FamilyKey_UnwrapKeyException() throws Exception {
+ // Test for KeyException->IOException wrapping when family has key bytes with key management
+ // enabled
+ // This covers the exception block at lines 103-105 in SecurityUtil.java
+
+ // Create a properly wrapped key first, then corrupt it to cause unwrapping failure
+ Key wrongKek = new SecretKeySpec("bad-kek-16-bytes".getBytes(), AES_CIPHER); // Exactly 16
+ // bytes
+ byte[] validWrappedKey = EncryptionUtil.wrapKey(conf, null, testKey, wrongKek);
+
+ when(mockFamily.getEncryptionKey()).thenReturn(validWrappedKey);
+ configBuilder().withKeyManagement(false).apply(conf);
+ setupSystemKeyCache(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey); // Different KEK for unwrapping
+
+ IOException exception = assertThrows(IOException.class, () -> {
+ SecurityUtil.createEncryptionContext(conf, mockTableDescriptor, mockFamily,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+ });
+
+ // The IOException should wrap a KeyException from the unwrapping process
+ assertNotNull("Exception should have a cause", exception.getCause());
+ assertTrue("Exception cause should be a KeyException",
+ exception.getCause() instanceof KeyException);
}
// Tests for the second createEncryptionContext method (for reading files)
@@ -473,9 +758,42 @@ public static class TestCreateEncryptionContext_ForReads extends TestSecurityUti
HBaseClassTestRule.forClass(TestCreateEncryptionContext_ForReads.class);
@Test
- public void testWithKEKMetadata() throws Exception {
- setupTrailerMocks(testWrappedKey, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, TEST_NAMESPACE);
- setupManagedKeyDataCacheEntry(TEST_NAMESPACE, TEST_KEK_METADATA, testWrappedKey,
+ public void testWithKEKMetadata_STKLookupFirstThenManagedKey() throws Exception {
+ // Test new logic: STK lookup happens first, then metadata lookup if STK fails
+ // Set up scenario where both checksum and metadata are available
+ setupTrailerMocks(testWrappedKey, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, null);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ // STK lookup should succeed and be used (first priority)
+ ManagedKeyData stkKeyData = mock(ManagedKeyData.class);
+ when(stkKeyData.getTheKey()).thenReturn(kekKey);
+ setupSystemKeyCache(TEST_KEK_CHECKSUM, stkKeyData);
+
+ // Also set up managed key cache (but it shouldn't be used since STK succeeds)
+ setupManagedKeyDataCacheEntry(testTableNamespace, TEST_KEK_METADATA, testWrappedKey,
+ mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey())
+ .thenThrow(new RuntimeException("This should not be called"));
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ // Should use STK data, not managed key data
+ assertEquals(stkKeyData, result.getKEKData());
+ }
+
+ @Test
+ public void testWithKEKMetadata_STKFailsThenManagedKeySucceeds() throws Exception {
+ // Test fallback: STK lookup fails, metadata lookup succeeds
+ setupTrailerMocks(testWrappedKey, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, testTableNamespace);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ // STK lookup should fail (returns null)
+ when(mockSystemKeyCache.getSystemKeyByChecksum(TEST_KEK_CHECKSUM)).thenReturn(null);
+
+ // Managed key lookup should succeed
+ setupManagedKeyDataCacheEntry(testTableNamespace, TEST_KEK_METADATA, testWrappedKey,
mockManagedKeyData);
when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
@@ -483,17 +801,27 @@ public void testWithKEKMetadata() throws Exception {
mockManagedKeyDataCache, mockSystemKeyCache);
verifyContext(result);
+ // Should use managed key data since STK failed
+ assertEquals(mockManagedKeyData, result.getKEKData());
}
@Test
- public void testWithKeyManagement_KEKMetadataFailure() throws IOException, KeyException {
+ public void testWithKeyManagement_KEKMetadataAndChecksumFailure()
+ throws IOException, KeyException {
+ // Test scenario where both STK lookup and managed key lookup fail
byte[] keyBytes = "test-encrypted-key".getBytes();
String kekMetadata = "test-kek-metadata";
+ configBuilder().withKeyManagement(false).apply(conf);
when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
when(mockTrailer.getKEKMetadata()).thenReturn(kekMetadata);
+ when(mockTrailer.getKEKChecksum()).thenReturn(TEST_KEK_CHECKSUM);
when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
+ // STK lookup should fail
+ when(mockSystemKeyCache.getSystemKeyByChecksum(TEST_KEK_CHECKSUM)).thenReturn(null);
+
+ // Managed key lookup should also fail
when(mockManagedKeyDataCache.getEntry(eq(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES),
eq("test-namespace"), eq(kekMetadata), eq(keyBytes)))
.thenThrow(new IOException("Key not found"));
@@ -503,13 +831,16 @@ public void testWithKeyManagement_KEKMetadataFailure() throws IOException, KeyEx
mockSystemKeyCache);
});
- assertTrue(exception.getMessage().contains("Failed to get key data"));
+ assertTrue(
+ exception.getMessage().contains("Failed to get key data for KEK metadata: " + kekMetadata));
+ assertTrue(exception.getCause().getMessage().contains("Key not found"));
}
@Test
public void testWithKeyManagement_UseSystemKey() throws IOException {
- setupTrailerMocks(testWrappedKey, null, TEST_KEK_CHECKSUM, TEST_NAMESPACE);
- configBuilder().withKeyManagement(true, false).apply(conf);
+ // Test STK lookup by checksum (first priority in new logic)
+ setupTrailerMocks(testWrappedKey, null, TEST_KEK_CHECKSUM, null);
+ configBuilder().withKeyManagement(false).apply(conf);
setupSystemKeyCache(TEST_KEK_CHECKSUM, mockManagedKeyData);
when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
@@ -517,38 +848,53 @@ public void testWithKeyManagement_UseSystemKey() throws IOException {
mockManagedKeyDataCache, mockSystemKeyCache);
verifyContext(result);
+ assertEquals(mockManagedKeyData, result.getKEKData());
}
@Test
- public void testWithKeyManagement_SystemKeyNotFound() throws IOException {
+ public void testBackwardsCompatibility_WithKeyManagement_LatestSystemKeyNotFound()
+ throws IOException {
+ // Test when both STK lookup by checksum fails and latest system key is null
byte[] keyBytes = "test-encrypted-key".getBytes();
- long kekChecksum = 12345L;
when(mockTrailer.getEncryptionKey()).thenReturn(keyBytes);
- when(mockTrailer.getKEKMetadata()).thenReturn(null);
- when(mockTrailer.getKEKChecksum()).thenReturn(kekChecksum);
- when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
// Enable key management
conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
- when(mockSystemKeyCache.getSystemKeyByChecksum(kekChecksum)).thenReturn(null);
+ // Both checksum lookup and latest system key lookup should fail
+ when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(null);
IOException exception = assertThrows(IOException.class, () -> {
SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
mockSystemKeyCache);
});
- assertTrue(exception.getMessage().contains("Failed to get system key"));
+ assertTrue(exception.getMessage().contains("Failed to get latest system key"));
+ }
+
+ @Test
+ public void testBackwardsCompatibility_FallbackToLatestSystemKey() throws IOException {
+ // Test fallback to latest system key when both checksum and metadata are unavailable
+ setupTrailerMocks(testWrappedKey, null, 0L, TEST_NAMESPACE); // No checksum, no metadata
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ ManagedKeyData latestSystemKey = mock(ManagedKeyData.class);
+ when(latestSystemKey.getTheKey()).thenReturn(kekKey);
+ when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(latestSystemKey);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result);
+ assertEquals(latestSystemKey, result.getKEKData());
}
@Test
public void testWithoutKeyManagemntEnabled() throws IOException {
- when(mockTrailer.getEncryptionKey()).thenReturn(testWrappedKey);
+ byte[] wrappedKey = createRandomWrappedKey(conf);
+ when(mockTrailer.getEncryptionKey()).thenReturn(wrappedKey);
when(mockTrailer.getKEKMetadata()).thenReturn(null);
- when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE);
- configBuilder().withKeyManagement(false, false).apply(conf);
- // TODO: Get the key provider to return kek when getKeys() is called.
Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
mockManagedKeyDataCache, mockSystemKeyCache);
@@ -556,13 +902,24 @@ public void testWithoutKeyManagemntEnabled() throws IOException {
verifyContext(result, false);
}
+ @Test
+ public void testKeyManagementBackwardsCompatibility() throws Exception {
+ when(mockTrailer.getEncryptionKey()).thenReturn(testWrappedKey);
+ when(mockSystemKeyCache.getLatestSystemKey()).thenReturn(mockManagedKeyData);
+ when(mockManagedKeyData.getTheKey()).thenReturn(kekKey);
+ configBuilder().withKeyManagement(false).apply(conf);
+
+ Encryption.Context result = SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer,
+ mockManagedKeyDataCache, mockSystemKeyCache);
+
+ verifyContext(result, true);
+ }
+
@Test
public void testWithoutKeyManagement_UnwrapFailure() throws IOException {
byte[] invalidKeyBytes = INVALID_KEY_DATA.getBytes();
when(mockTrailer.getEncryptionKey()).thenReturn(invalidKeyBytes);
when(mockTrailer.getKEKMetadata()).thenReturn(null);
- when(mockTrailer.getKeyNamespace()).thenReturn(TEST_NAMESPACE);
- configBuilder().withKeyManagement(false, false).apply(conf);
Exception exception = assertThrows(Exception.class, () -> {
SecurityUtil.createEncryptionContext(conf, testPath, mockTrailer, mockManagedKeyDataCache,
@@ -579,11 +936,10 @@ public void testCreateEncryptionContext_WithoutKeyManagement_UnavailableCipher()
throws Exception {
// Create a DES key and wrap it first with working configuration
Key desKey = new SecretKeySpec("test-key-16-byte".getBytes(), "DES");
- byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, "hbase", desKey);
+ byte[] wrappedDESKey = EncryptionUtil.wrapKey(conf, HBASE_KEY, desKey);
when(mockTrailer.getEncryptionKey()).thenReturn(wrappedDESKey);
when(mockTrailer.getKEKMetadata()).thenReturn(null);
- when(mockTrailer.getKeyNamespace()).thenReturn("test-namespace");
// Disable key management and use null cipher provider
conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
@@ -634,7 +990,8 @@ public void testCreateEncryptionContext_WithKeyManagement_NullSystemKeyCache()
null);
});
- assertTrue(exception.getMessage().contains("SystemKeyCache is null"));
+ assertTrue(exception.getMessage()
+ .contains("SystemKeyCache can't be null when using key management feature"));
}
}
@@ -654,18 +1011,14 @@ public static Collection data() {
return Arrays.asList(new Object[][] { { true }, { false }, });
}
- @Test
- public void test() throws IOException {
- }
-
@Test
public void testWithDEK() throws IOException, KeyException {
- // This test is challenging because we need to create a scenario where unwrapping fails
- // with either KeyException or IOException. We'll create invalid wrapped data.
- byte[] invalidKeyBytes = INVALID_WRAPPED_KEY_DATA.getBytes();
+ byte[] wrappedKey = createRandomWrappedKey(conf);
+ MockAesKeyProvider keyProvider = (MockAesKeyProvider) Encryption.getKeyProvider(conf);
+ keyProvider.clearKeys(); // Let a new key be instantiated and cause a unwrap failure.
- setupTrailerMocks(invalidKeyBytes, TEST_KEK_METADATA, TEST_KEK_CHECKSUM, TEST_NAMESPACE);
- setupManagedKeyDataCacheEntry(TEST_NAMESPACE, TEST_KEK_METADATA, invalidKeyBytes,
+ setupTrailerMocks(wrappedKey, null, 0L, null);
+ setupManagedKeyDataCacheEntry(TEST_NAMESPACE, TEST_KEK_METADATA, wrappedKey,
mockManagedKeyData);
IOException exception = assertThrows(IOException.class, () -> {
@@ -673,8 +1026,7 @@ public void testWithDEK() throws IOException, KeyException {
mockSystemKeyCache);
});
- assertTrue(exception.getMessage().contains("Failed to unwrap key with KEK checksum: "
- + TEST_KEK_CHECKSUM + ", metadata: " + TEST_KEK_METADATA));
+ assertTrue(exception.getMessage().contains("Key was not successfully unwrapped"));
// The root cause should be some kind of parsing/unwrapping exception
assertNotNull(exception.getCause());
}
@@ -684,8 +1036,8 @@ public void testWithSystemKey() throws IOException {
// Use invalid key bytes to trigger unwrapping failure
byte[] invalidKeyBytes = INVALID_SYSTEM_KEY_DATA.getBytes();
- setupTrailerMocks(invalidKeyBytes, null, TEST_KEK_CHECKSUM, TEST_NAMESPACE);
- configBuilder().withKeyManagement(true, false).apply(conf);
+ setupTrailerMocks(invalidKeyBytes, null, TEST_KEK_CHECKSUM, null);
+ configBuilder().withKeyManagement(false).apply(conf);
setupSystemKeyCache(TEST_KEK_CHECKSUM, mockManagedKeyData);
IOException exception = assertThrows(IOException.class, () -> {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java
index f0cc2febd6e8..7b67b838659b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.util;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
@@ -30,7 +31,9 @@
import org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider;
import org.apache.hadoop.hbase.io.crypto.Encryption;
import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider;
import org.apache.hadoop.hbase.io.crypto.MockAesKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.ClassRule;
@@ -130,6 +133,71 @@ public void testTestEnabledWhenCryptoIsExplicitlyDisabled() throws Exception {
EncryptionTest.testEncryption(conf, algorithm, null);
}
+ // Utility methods for configuration setup
+ private Configuration createManagedKeyProviderConfig() {
+ Configuration conf = HBaseConfiguration.create();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
+ return conf;
+ }
+
+ @Test
+ public void testManagedKeyProvider() throws Exception {
+ Configuration conf = createManagedKeyProviderConfig();
+ EncryptionTest.testKeyProvider(conf);
+ assertTrue("Managed provider should be cached", EncryptionTest.keyProviderResults
+ .containsKey(conf.get(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY)));
+ }
+
+ @Test(expected = IOException.class)
+ public void testBadManagedKeyProvider() throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ FailingManagedKeyProvider.class.getName());
+ EncryptionTest.testKeyProvider(conf);
+ fail("Instantiation of bad managed key provider should have failed check");
+ }
+
+ @Test
+ public void testEncryptionWithManagedKeyProvider() throws Exception {
+ Configuration conf = createManagedKeyProviderConfig();
+ String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
+ EncryptionTest.testEncryption(conf, algorithm, null);
+ assertTrue("Managed provider should be cached", EncryptionTest.keyProviderResults
+ .containsKey(conf.get(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY)));
+ }
+
+ @Test(expected = IOException.class)
+ public void testUnknownCipherWithManagedKeyProvider() throws Exception {
+ Configuration conf = createManagedKeyProviderConfig();
+ EncryptionTest.testEncryption(conf, "foobar", null);
+ fail("Test for bogus cipher should have failed with managed key provider");
+ }
+
+ @Test(expected = IOException.class)
+ public void testManagedKeyProviderWhenCryptoIsExplicitlyDisabled() throws Exception {
+ Configuration conf = createManagedKeyProviderConfig();
+ String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
+ conf.setBoolean(Encryption.CRYPTO_ENABLED_CONF_KEY, false);
+ EncryptionTest.testEncryption(conf, algorithm, null);
+ assertTrue("Managed provider should be cached", EncryptionTest.keyProviderResults
+ .containsKey(conf.get(HConstants.CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY)));
+ }
+
+ @Test(expected = IOException.class)
+ public void testManagedKeyProviderWithKeyManagementDisabled() throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+ // This should cause issues since we're trying to use managed provider without enabling key
+ // management
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, ManagedKeyStoreKeyProvider.class.getName());
+
+ EncryptionTest.testKeyProvider(conf);
+ fail("Should have failed when using managed provider with key management disabled");
+ }
+
public static class FailingKeyProvider implements KeyProvider {
@Override
@@ -181,4 +249,12 @@ public Cipher getCipher(String name) {
}
}
+
+ // Helper class for testing failing managed key provider
+ public static class FailingManagedKeyProvider extends MockManagedKeyProvider {
+ @Override
+ public void initConfig(Configuration conf, String params) {
+ throw new RuntimeException("BAD MANAGED PROVIDER!");
+ }
+ }
}
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 93cc312338c9..2b1d29e7849e 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -1223,6 +1223,10 @@ def cfd(arg, tdb)
cfdb.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key,
algorithm))
end
+ if arg.include?(ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY_NAMESPACE)
+ cfdb.setEncryptionKeyNamespace(arg.delete(
+ ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY_NAMESPACE))
+ end
end
if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT)
compression = arg.delete(ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT).upcase.to_sym
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java
index b67fbc69f3c7..8315d05f3feb 100644
--- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaAdminShell.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.client;
-import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
@@ -50,47 +49,64 @@ public class TestKeymetaAdminShell extends ManagedKeyTestBase implements RubyShe
@Before
public void setUp() throws Exception {
final Configuration conf = TEST_UTIL.getConfiguration();
- conf.set("zookeeper.session.timeout", "6000000");
- conf.set("hbase.rpc.timeout", "6000000");
- conf.set("hbase.rpc.read.timeout", "6000000");
- conf.set("hbase.rpc.write.timeout", "6000000");
- conf.set("hbase.client.operation.timeout", "6000000");
- conf.set("hbase.client.scanner.timeout.period", "6000000");
- conf.set("hbase.ipc.client.socket.timeout.connect", "6000000");
- conf.set("hbase.ipc.client.socket.timeout.read", "6000000");
- conf.set("hbase.ipc.client.socket.timeout.write", "6000000");
- conf.set("hbase.master.start.timeout.localHBaseCluster", "6000000");
- conf.set("hbase.master.init.timeout.localHBaseCluster", "6000000");
- conf.set("hbase.client.sync.wait.timeout.msec", "6000000");
- Map cust2key = new HashMap<>();
- Map cust2alias = new HashMap<>();
+ // Enable to be able to debug without timing out.
+ // conf.set("zookeeper.session.timeout", "6000000");
+ // conf.set("hbase.rpc.timeout", "6000000");
+ // conf.set("hbase.rpc.read.timeout", "6000000");
+ // conf.set("hbase.rpc.write.timeout", "6000000");
+ // conf.set("hbase.client.operation.timeout", "6000000");
+ // conf.set("hbase.client.scanner.timeout.period", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.connect", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.read", "6000000");
+ // conf.set("hbase.ipc.client.socket.timeout.write", "6000000");
+ // conf.set("hbase.master.start.timeout.localHBaseCluster", "6000000");
+ // conf.set("hbase.master.init.timeout.localHBaseCluster", "6000000");
+ // conf.set("hbase.client.sync.wait.timeout.msec", "6000000");
+ // conf.set("hbase.client.retries.number", "1000");
+ Map cust_to_key = new HashMap<>();
+ Map cust_to_alias = new HashMap<>();
String clusterId = UUID.randomUUID().toString();
String SYSTEM_KEY_ALIAS = "system-key-alias";
String CUST1 = "cust1";
String CUST1_ALIAS = "cust1-alias";
+ String CF_NAMESPACE = "test_table/f";
String GLOB_CUST_ALIAS = "glob-cust-alias";
- String providerParams = KeymetaTestUtils.setupTestKeyStore(TEST_UTIL, true, true, store -> {
- Properties p = new Properties();
- try {
- KeymetaTestUtils.addEntry(conf, 128, store, CUST1_ALIAS, CUST1, true, cust2key, cust2alias,
- p);
- KeymetaTestUtils.addEntry(conf, 128, store, GLOB_CUST_ALIAS, "*", true, cust2key,
- cust2alias, p);
- KeymetaTestUtils.addEntry(conf, 128, store, SYSTEM_KEY_ALIAS, clusterId, true, cust2key,
- cust2alias, p);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- return p;
- });
- // byte[] systemKey = cust2key.get(new Bytes(clusterId.getBytes())).get();
- conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS);
- conf.set(HConstants.CRYPTO_KEYPROVIDER_PARAMETERS_KEY, providerParams);
+ String CUSTOM_NAMESPACE = "test_namespace";
+ String CUSTOM_NAMESPACE_ALIAS = "custom-namespace-alias";
+ String CUSTOM_GLOBAL_NAMESPACE = "test_global_namespace";
+ String CUSTOM_GLOBAL_NAMESPACE_ALIAS = "custom-global-namespace-alias";
+ if (isWithKeyManagement()) {
+ String providerParams = KeymetaTestUtils.setupTestKeyStore(TEST_UTIL, true, true, store -> {
+ Properties p = new Properties();
+ try {
+ KeymetaTestUtils.addEntry(conf, 128, store, CUST1_ALIAS, CUST1, true, cust_to_key,
+ cust_to_alias, p);
+ KeymetaTestUtils.addEntry(conf, 128, store, CUST1_ALIAS, CUST1, true, cust_to_key,
+ cust_to_alias, p, CF_NAMESPACE);
+ KeymetaTestUtils.addEntry(conf, 128, store, GLOB_CUST_ALIAS, "*", true, cust_to_key,
+ cust_to_alias, p);
+ KeymetaTestUtils.addEntry(conf, 128, store, SYSTEM_KEY_ALIAS, clusterId, true,
+ cust_to_key, cust_to_alias, p);
+ KeymetaTestUtils.addEntry(conf, 128, store, CUSTOM_NAMESPACE_ALIAS, CUST1, true,
+ cust_to_key, cust_to_alias, p, CUSTOM_NAMESPACE);
+ KeymetaTestUtils.addEntry(conf, 128, store, CUSTOM_GLOBAL_NAMESPACE_ALIAS, "*", true,
+ cust_to_key, cust_to_alias, p, CUSTOM_GLOBAL_NAMESPACE);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return p;
+ });
+ // byte[] systemKey = cust2key.get(new Bytes(clusterId.getBytes())).get();
+ conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS);
+ conf.set(HConstants.CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY, providerParams);
+ }
RubyShellTest.setUpConfig(this);
super.setUp();
RubyShellTest.setUpJRubyRuntime(this);
RubyShellTest.doTestSetup(this);
+ addCustodianRubyEnvVars(jruby, "GLOB_CUST", "*");
addCustodianRubyEnvVars(jruby, "CUST1", CUST1);
+ jruby.put("$TEST", this);
}
@Override
@@ -122,6 +138,6 @@ public static void addCustodianRubyEnvVars(ScriptingContainer jruby, String cust
String custodian) {
jruby.put("$" + custId, custodian);
jruby.put("$" + custId + "_ALIAS", custodian + "-alias");
- jruby.put("$" + custId + "_ENCODED", Base64.getEncoder().encodeToString(custodian.getBytes()));
+ jruby.put("$" + custId + "_ENCODED", ManagedKeyProvider.encodeToStr(custodian.getBytes()));
}
}
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMigration.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMigration.java
new file mode 100644
index 000000000000..efe124989e56
--- /dev/null
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMigration.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.junit.ClassRule;
+import org.junit.experimental.categories.Category;
+
+@Category({ ClientTests.class, IntegrationTests.class })
+public class TestKeymetaMigration extends TestKeymetaAdminShell {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaMigration.class);
+
+ @Override
+ public String getSuitePattern() {
+ return "**/*_keymeta_migration_test.rb";
+ }
+
+ @Override
+ protected boolean isWithKeyManagement() {
+ return false;
+ }
+
+ @Override
+ protected boolean isWithMiniClusterStart() {
+ return false;
+ }
+
+ @Override
+ protected TableName getSystemTableNameToWaitFor() {
+ return TableName.META_TABLE_NAME;
+ }
+}
diff --git a/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb
index c1108d0fc7d1..9f3048ab5991 100644
--- a/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb
+++ b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb
@@ -35,27 +35,32 @@ def setup
end
define_test 'Test enable key management' do
- cust_and_namespace = "#{$CUST1_ENCODED}:*"
+ test_key_management($CUST1_ENCODED, '*')
+ test_key_management($CUST1_ENCODED, 'test_table/f')
+ test_key_management($CUST1_ENCODED, 'test_namespace')
+ test_key_management($GLOB_CUST_ENCODED, '*')
+
+ puts "Testing that cluster can be restarted when key management is enabled"
+ $TEST.restartMiniCluster()
+ puts "Cluster restarted, testing key management again"
+ setup_hbase
+ test_key_management($GLOB_CUST_ENCODED, '*')
+ puts "Key management test complete"
+ end
+
+ def test_key_management(cust, namespace)
# Repeat the enable twice in a loop and ensure multiple enables succeed and return the
# same output.
2.times do |i|
+ cust_and_namespace = "#{cust}:#{namespace}"
output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
- puts "enable_key_management #{i} output: #{output}"
- assert(output.include?("#{$CUST1_ENCODED} * ACTIVE"))
+ puts "enable_key_management output: #{output}"
+ assert(output.include?("#{cust} #{namespace} ACTIVE"))
+ output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
+ puts "show_key_status output: #{output}"
+ assert(output.include?("#{cust} #{namespace} ACTIVE"))
+ assert(output.include?('1 row(s)'))
end
- output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
- puts "show_key_status output: #{output}"
- assert(output.include?("#{$CUST1_ENCODED} * ACTIVE"))
-
- # The ManagedKeyStoreKeyProvider doesn't support specific namespaces, so it will return the
- # global key.
- cust_and_namespace = "#{$CUST1_ENCODED}:test_table/f"
- output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
- puts "enable_key_management output: #{output}"
- assert(output.include?("#{$CUST1_ENCODED} * ACTIVE"))
- output = capture_stdout { @shell.command('show_key_status', cust_and_namespace) }
- puts "show_key_status output: #{output}"
- assert(output.include?('0 row(s)'))
end
end
end
diff --git a/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb
index be52a2524e4d..2562a64779e0 100644
--- a/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb
+++ b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb
@@ -31,6 +31,7 @@
java_import org.apache.hadoop.hbase.HConstants
java_import org.apache.hadoop.hbase.client.Get
java_import org.apache.hadoop.hbase.io.crypto.Encryption
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider
java_import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider
java_import org.apache.hadoop.hbase.io.hfile.CorruptHFileException
java_import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
@@ -45,14 +46,29 @@ class EncryptedTableKeymetaTest < Test::Unit::TestCase
def setup
setup_hbase
- @test_table = 'enctest'
+ @test_table = 'enctest'+Time.now.to_i.to_s
@connection = $TEST_CLUSTER.connection
end
define_test 'Test table put/get with encryption' do
- cust_and_namespace = "#{$CUST1_ENCODED}:*"
- @shell.command(:enable_key_management, cust_and_namespace)
- @shell.command(:create, @test_table, { 'NAME' => 'f', 'ENCRYPTION' => 'AES' })
+ # Custodian is currently not supported, so this will end up falling back to local key
+ # generation.
+ test_table_put_get_with_encryption($CUST1_ENCODED, '*',
+ { 'NAME' => 'f', 'ENCRYPTION' => 'AES' }, true)
+ end
+
+ define_test 'Test table with custom namespace attribute in Column Family' do
+ custom_namespace = "test_global_namespace"
+ test_table_put_get_with_encryption($GLOB_CUST_ENCODED, custom_namespace,
+ { 'NAME' => 'f', 'ENCRYPTION' => 'AES', 'ENCRYPTION_KEY_NAMESPACE' => custom_namespace },
+ false)
+ end
+
+ def test_table_put_get_with_encryption(cust, namespace, table_attrs, fallback_scenario)
+ cust_and_namespace = "#{cust}:#{namespace}"
+ output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) }
+ assert(output.include?("#{cust} #{namespace} ACTIVE"))
+ @shell.command(:create, @test_table, table_attrs)
test_table = table(@test_table)
test_table.put('1', 'f:a', '2')
puts "Added a row, now flushing table #{@test_table}"
@@ -72,6 +88,20 @@ def setup
assert_not_nil(hfile_info)
live_trailer = hfile_info.getTrailer
assert_trailer(live_trailer)
+ assert_equal(namespace, live_trailer.getKeyNamespace())
+
+ # When active key is supposed to be used, we can valiate the key bytes in the context against
+ # the actual key from provider.
+ if !fallback_scenario
+ encryption_context = hfile_info.getHFileContext().getEncryptionContext()
+ assert_not_nil(encryption_context)
+ assert_not_nil(encryption_context.getKeyBytes())
+ key_provider = Encryption.getManagedKeyProvider($TEST_CLUSTER.getConfiguration)
+ key_data = key_provider.getManagedKey(ManagedKeyProvider.decodeToBytes(cust), namespace)
+ assert_not_nil(key_data)
+ assert_equal(namespace, key_data.getKeyNamespace())
+ assert_equal(key_data.getTheKey().getEncoded(), encryption_context.getKeyBytes())
+ end
## Disable table to ensure that the stores are not cached.
command(:disable, @test_table)
@@ -104,14 +134,15 @@ def setup
# Confirm that the offline reading will fail with no config related to encryption
Encryption.clearKeyProviderCache
conf = Configuration.new($TEST_CLUSTER.getConfiguration)
- conf.set(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.java_class.getName)
+ conf.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.java_class.getName)
# This is expected to fail with CorruptHFileException.
- assert_raises(CorruptHFileException) do |e|
+ e = assert_raises(CorruptHFileException) do
reader = HFile.createReader(fs, store_file_info.getPath, CacheConfig::DISABLED, true, conf)
- assert_true(e.message.include?(
- "Problem reading HFile Trailer from file #{store_file_info.getPath}"
- ))
end
+ assert_true(e.message.include?(
+ "Problem reading HFile Trailer from file #{store_file_info.getPath}"
+ ))
Encryption.clearKeyProviderCache
## Enable back the table to be able to query.
diff --git a/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb b/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb
new file mode 100644
index 000000000000..978ee79e8655
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb
@@ -0,0 +1,641 @@
+# frozen_string_literal: true
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_shell'
+require 'stringio'
+require 'hbase_constants'
+require 'hbase/hbase'
+require 'hbase/table'
+require 'tempfile'
+require 'fileutils'
+
+java_import org.apache.hadoop.conf.Configuration
+java_import org.apache.hadoop.fs.FSDataInputStream
+java_import org.apache.hadoop.hbase.CellUtil
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.TableName
+java_import org.apache.hadoop.hbase.client.Get
+java_import org.apache.hadoop.hbase.client.Scan
+java_import org.apache.hadoop.hbase.io.crypto.Encryption
+java_import org.apache.hadoop.hbase.io.crypto.KeyStoreKeyProvider
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider
+java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider
+java_import org.apache.hadoop.hbase.io.hfile.FixedFileTrailer
+java_import org.apache.hadoop.hbase.io.hfile.HFile
+java_import org.apache.hadoop.hbase.io.hfile.CacheConfig
+java_import org.apache.hadoop.hbase.util.Bytes
+java_import org.apache.hadoop.hbase.keymeta.KeymetaServiceEndpoint
+java_import org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor
+java_import org.apache.hadoop.hbase.security.EncryptionUtil
+java_import java.security.KeyStore
+java_import java.security.MessageDigest
+java_import javax.crypto.spec.SecretKeySpec
+java_import java.io.FileOutputStream
+java_import java.net.URLEncoder
+java_import java.util.Base64
+
+module Hbase
+ # Test class for key provider migration functionality
+ class KeyProviderKeymetaMigrationTest < Test::Unit::TestCase
+ include TestHelpers
+
+ def setup
+ @test_timestamp = Time.now.to_i.to_s
+ @master_key_alias = 'masterkey'
+ @shared_key_alias = 'sharedkey'
+ @table_key_alias = 'tablelevelkey'
+ @cf_key1_alias = 'cfkey1'
+ @cf_key2_alias = 'cfkey2'
+ @keystore_password = 'password'
+
+ # Test table names
+ @table_no_encryption = "no_enc_#{@test_timestamp}"
+ @table_random_key = "random_key_#{@test_timestamp}"
+ @table_table_key = "table_key_#{@test_timestamp}"
+ @table_shared_key1 = "shared1_#{@test_timestamp}"
+ @table_shared_key2 = "shared2_#{@test_timestamp}"
+ @table_cf_keys = "cf_keys_#{@test_timestamp}"
+
+ # Unified table metadata with CFs and expected namespaces
+ @tables_metadata = {
+ @table_no_encryption => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => nil },
+ no_encryption: true
+ },
+ @table_random_key => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => nil }
+ },
+ @table_table_key => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => @table_table_key }
+ },
+ @table_shared_key1 => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => 'shared-global-key' }
+ },
+ @table_shared_key2 => {
+ cfs: ['f'],
+ expected_namespace: { 'f' => 'shared-global-key' }
+ },
+ @table_cf_keys => {
+ cfs: ['cf1', 'cf2'],
+ expected_namespace: {
+ 'cf1' => "#{@table_cf_keys}/cf1",
+ 'cf2' => "#{@table_cf_keys}/cf2"
+ }
+ }
+ }
+
+
+ # Setup initial KeyStoreKeyProvider
+ setup_old_key_provider
+ puts " >> Starting Cluster"
+ $TEST.startMiniCluster()
+ puts " >> Cluster started"
+
+ setup_hbase
+ end
+
+ define_test 'Test complete key provider migration' do
+ puts "\n=== Starting Key Provider Migration Test ==="
+
+ # Step 1-3: Setup old provider and create tables
+ create_test_tables
+ puts "\n--- Validating initial table operations ---"
+ validate_pre_migration_operations(false)
+
+ # Step 4: Setup new provider and restart
+ setup_new_key_provider
+ restart_cluster_and_validate
+
+ # Step 5: Perform migration
+ migrate_tables_step_by_step
+
+ # Step 6: Cleanup and final validation
+ cleanup_old_provider_and_validate
+
+ puts "\n=== Migration Test Completed Successfully ==="
+ end
+
+ private
+
+ def setup_old_key_provider
+ puts "\n--- Setting up old KeyStoreKeyProvider ---"
+
+ # Use proper test directory (similar to KeymetaTestUtils.setupTestKeyStore)
+ test_data_dir = $TEST_CLUSTER.getDataTestDir("old_keystore_#{@test_timestamp}").toString
+ FileUtils.mkdir_p(test_data_dir)
+ @old_keystore_file = File.join(test_data_dir, 'keystore.jceks')
+ puts " >> Old keystore file: #{@old_keystore_file}"
+
+ # Create keystore with only the master key
+ # ENCRYPTION_KEY attributes generate their own keys and don't use keystore entries
+ create_keystore(@old_keystore_file, {
+ @master_key_alias => generate_key(@master_key_alias)
+ })
+
+ # Configure old KeyStoreKeyProvider
+ provider_uri = "jceks://#{File.expand_path(@old_keystore_file)}?password=#{@keystore_password}"
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY,
+ KeyStoreKeyProvider.java_class.name)
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_KEYPROVIDER_PARAMETERS_KEY, provider_uri)
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MASTERKEY_NAME_CONF_KEY, @master_key_alias)
+
+ puts " >> Old KeyStoreKeyProvider configured with keystore: #{@old_keystore_file}"
+ end
+
+ def create_test_tables
+ puts "\n--- Creating test tables ---"
+
+ # 1. Table without encryption
+ command(:create, @table_no_encryption, { 'NAME' => 'f' })
+ puts " >> Created table #{@table_no_encryption} without encryption"
+
+ # 2. Table with random key (no explicit key set)
+ command(:create, @table_random_key, { 'NAME' => 'f', 'ENCRYPTION' => 'AES' })
+ puts " >> Created table #{@table_random_key} with random key"
+
+ # 3. Table with table-level key
+ command(:create, @table_table_key, { 'NAME' => 'f', 'ENCRYPTION' => 'AES',
+ 'ENCRYPTION_KEY' => @table_key_alias })
+ puts " >> Created table #{@table_table_key} with table-level key"
+
+ # 4. First table with shared key
+ command(:create, @table_shared_key1, { 'NAME' => 'f', 'ENCRYPTION' => 'AES',
+ 'ENCRYPTION_KEY' => @shared_key_alias })
+ puts " >> Created table #{@table_shared_key1} with shared key"
+
+ # 5. Second table with shared key
+ command(:create, @table_shared_key2, { 'NAME' => 'f', 'ENCRYPTION' => 'AES',
+ 'ENCRYPTION_KEY' => @shared_key_alias })
+ puts " >> Created table #{@table_shared_key2} with shared key"
+
+ # 6. Table with column family specific keys
+ command(:create, @table_cf_keys,
+ { 'NAME' => 'cf1', 'ENCRYPTION' => 'AES', 'ENCRYPTION_KEY' => @cf_key1_alias },
+ { 'NAME' => 'cf2', 'ENCRYPTION' => 'AES', 'ENCRYPTION_KEY' => @cf_key2_alias })
+ puts " >> Created table #{@table_cf_keys} with CF-specific keys"
+ end
+
+ def validate_pre_migration_operations(is_key_management_enabled)
+ @tables_metadata.each do |table_name, metadata|
+ puts " >> test_table_operations on table: #{table_name} with CFs: #{metadata[:cfs].join(', ')}"
+ if metadata[:no_encryption]
+ next
+ end
+ test_table_operations(table_name, metadata[:cfs])
+ check_hfile_trailers_pre_migration(table_name, metadata[:cfs], is_key_management_enabled)
+ end
+ end
+
+ def test_table_operations(table_name, column_families)
+ puts " >> Testing operations on table #{table_name}"
+
+ test_table = table(table_name)
+
+ column_families.each do |cf|
+ puts " >> Running put operations on CF: #{cf} in table: #{table_name}"
+ # Put data
+ test_table.put('row1', "#{cf}:col1", 'value1')
+ test_table.put('row2', "#{cf}:col2", 'value2')
+ end
+
+ # Flush table
+ puts " >> Flushing table: #{table_name}"
+ $TEST_CLUSTER.flush(TableName.valueOf(table_name))
+
+ # Get data and validate
+ column_families.each do |cf|
+ puts " >> Validating data in CF: #{cf} in table: #{table_name}"
+ get_result = test_table.table.get(Get.new(Bytes.toBytes('row1')))
+ assert_false(get_result.isEmpty)
+ assert_equal('value1',
+ Bytes.toString(get_result.getValue(Bytes.toBytes(cf), Bytes.toBytes('col1'))))
+ end
+
+ puts " >> Operations validated for #{table_name}"
+ end
+
+ def setup_new_key_provider
+ puts "\n--- Setting up new ManagedKeyStoreKeyProvider ---"
+
+ # Use proper test directory (similar to KeymetaTestUtils.setupTestKeyStore)
+ test_data_dir = $TEST_CLUSTER.getDataTestDir("new_keystore_#{@test_timestamp}").toString
+ FileUtils.mkdir_p(test_data_dir)
+ @new_keystore_file = File.join(test_data_dir, 'managed_keystore.jceks')
+ puts " >> New keystore file: #{@new_keystore_file}"
+
+ # Extract wrapped keys from encrypted tables and unwrap them
+ migrated_keys = extract_and_unwrap_keys_from_tables
+
+ # Create new keystore with migrated keys
+ create_keystore(@new_keystore_file, migrated_keys)
+
+ # Configure ManagedKeyStoreKeyProvider
+ provider_uri = "jceks://#{File.expand_path(@new_keystore_file)}?password=#{@keystore_password}"
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, 'true')
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY,
+ ManagedKeyStoreKeyProvider.java_class.name)
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY, provider_uri)
+ $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY,
+ 'system_key')
+
+ # Setup key configurations for ManagedKeyStoreKeyProvider
+ # Shared key configuration
+ $TEST_CLUSTER.getConfiguration.set(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.shared-global-key.alias",
+ 'shared_global_key')
+ $TEST_CLUSTER.getConfiguration.setBoolean(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.shared-global-key.active", true)
+
+ # Table-level key configuration - let system determine namespace automatically
+ $TEST_CLUSTER.getConfiguration.set(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_table_key}.alias",
+ "#{@table_table_key}_key")
+ $TEST_CLUSTER.getConfiguration.setBoolean(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_table_key}.active", true)
+
+ # CF-level key configurations - let system determine namespace automatically
+ $TEST_CLUSTER.getConfiguration.set(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf1.alias",
+ "#{@table_cf_keys}_cf1_key")
+ $TEST_CLUSTER.getConfiguration.setBoolean(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf1.active", true)
+
+ $TEST_CLUSTER.getConfiguration.set(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf2.alias",
+ "#{@table_cf_keys}_cf2_key")
+ $TEST_CLUSTER.getConfiguration.setBoolean(
+ "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf2.active", true)
+
+ # Enable KeyMeta coprocessor
+ $TEST_CLUSTER.getConfiguration.set('hbase.coprocessor.master.classes',
+ KeymetaServiceEndpoint.java_class.name)
+
+ puts " >> New ManagedKeyStoreKeyProvider configured"
+ end
+
+ def restart_cluster_and_validate
+ puts "\n--- Restarting cluster with managed key store key provider ---"
+
+ $TEST.restartMiniCluster(KeymetaTableAccessor::KEY_META_TABLE_NAME)
+ puts " >> Cluster restarted with ManagedKeyStoreKeyProvider"
+ setup_hbase
+
+ # Validate key management service is functional
+ output = capture_stdout { command(:show_key_status, "#{$GLOB_CUST_ENCODED}:*") }
+ assert(output.include?('0 row(s)'), "Expected 0 rows from show_key_status, got: #{output}")
+ #assert(output.include?(' FAILED '), "Expected FAILED status for show_key_status, got: #{output}")
+ puts " >> Key management service is functional"
+
+ # Test operations still work and check HFile trailers
+ puts "\n--- Validating operations after restart ---"
+ validate_pre_migration_operations(true)
+ end
+
+ def check_hfile_trailers_pre_migration(table_name, column_families, is_key_management_enabled)
+ puts " >> Checking HFile trailers for #{table_name} with CFs: #{column_families.join(', ')}"
+
+ column_families.each do |cf_name|
+ validate_hfile_trailer(table_name, cf_name, false, is_key_management_enabled, false)
+ end
+ end
+
+ def migrate_tables_step_by_step
+ puts "\n--- Performing step-by-step table migration ---"
+
+ # Migrate shared key tables first
+ migrate_shared_key_tables
+
+ # Migrate table-level key
+ migrate_table_level_key
+
+ # Migrate CF-level keys
+ migrate_cf_level_keys
+ end
+
+ def migrate_shared_key_tables
+ puts "\n--- Migrating shared key tables ---"
+
+ # Enable key management for shared global key
+ cust_and_namespace = "#{$GLOB_CUST_ENCODED}:shared-global-key"
+ output = capture_stdout { command(:enable_key_management, cust_and_namespace) }
+ assert(output.include?("#{$GLOB_CUST_ENCODED} shared-global-key ACTIVE"),
+ "Expected ACTIVE status for shared key, got: #{output}")
+ puts " >> Enabled key management for shared global key"
+
+ # Migrate first shared key table
+ migrate_table_to_managed_key(@table_shared_key1, 'f', 'shared-global-key', true)
+
+ # Migrate second shared key table
+ migrate_table_to_managed_key(@table_shared_key2, 'f', 'shared-global-key', true)
+ end
+
+ def migrate_table_level_key
+ puts "\n--- Migrating table-level key ---"
+
+ # Enable key management for table namespace
+ cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{@table_table_key}"
+ output = capture_stdout { command(:enable_key_management, cust_and_namespace) }
+ assert(output.include?("#{$GLOB_CUST_ENCODED} #{@table_table_key} ACTIVE"),
+ "Expected ACTIVE status for table key, got: #{output}")
+ puts " >> Enabled key management for table-level key"
+
+ # Migrate the table - no namespace attribute, let system auto-determine
+ migrate_table_to_managed_key(@table_table_key, 'f', @table_table_key, false)
+ end
+
+ def migrate_cf_level_keys
+ puts "\n--- Migrating CF-level keys ---"
+
+ # Enable key management for CF1
+ cf1_namespace = "#{@table_cf_keys}/cf1"
+ cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{cf1_namespace}"
+ output = capture_stdout { command(:enable_key_management, cust_and_namespace) }
+ assert(output.include?("#{$GLOB_CUST_ENCODED} #{cf1_namespace} ACTIVE"),
+ "Expected ACTIVE status for CF1 key, got: #{output}")
+ puts " >> Enabled key management for CF1"
+
+ # Enable key management for CF2
+ cf2_namespace = "#{@table_cf_keys}/cf2"
+ cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{cf2_namespace}"
+ output = capture_stdout { command(:enable_key_management, cust_and_namespace) }
+ assert(output.include?("#{$GLOB_CUST_ENCODED} #{cf2_namespace} ACTIVE"),
+ "Expected ACTIVE status for CF2 key, got: #{output}")
+ puts " >> Enabled key management for CF2"
+
+ # Migrate CF1
+ migrate_table_to_managed_key(@table_cf_keys, 'cf1', cf1_namespace, false)
+
+ # Migrate CF2
+ migrate_table_to_managed_key(@table_cf_keys, 'cf2', cf2_namespace, false)
+ end
+
+ def migrate_table_to_managed_key(table_name, cf_name, namespace, use_namespace_attribute = false)
+ puts " >> Migrating table #{table_name}, CF #{cf_name} to namespace #{namespace}"
+
+ # Use atomic alter operation to remove ENCRYPTION_KEY and optionally add ENCRYPTION_KEY_NAMESPACE
+ if use_namespace_attribute
+ # For shared key tables: remove ENCRYPTION_KEY and add ENCRYPTION_KEY_NAMESPACE atomically
+ command(:alter, table_name,
+ { 'NAME' => cf_name, 'CONFIGURATION' => {'ENCRYPTION_KEY' => '', 'ENCRYPTION_KEY_NAMESPACE' => namespace }})
+ else
+ # For table/CF level keys: just remove ENCRYPTION_KEY, let system auto-determine namespace
+ command(:alter, table_name,
+ { 'NAME' => cf_name, 'CONFIGURATION' => {'ENCRYPTION_KEY' => '' }})
+ end
+
+ puts " >> Altered #{table_name} CF #{cf_name} to use namespace #{namespace}"
+
+ # The new encryption attribute won't be used unless HStore is reinitialized.
+ # To force reinitialization, disable and enable the table.
+ command(:disable, table_name)
+ command(:enable, table_name)
+ # sleep for 5s to ensure region is reopened and store is reinitialized
+ sleep(5)
+
+ # Scan all existing data to verify accessibility
+ scan_and_validate_table(table_name, [cf_name])
+
+ # Add new data
+ test_table = table(table_name)
+ test_table.put('new_row', "#{cf_name}:new_col", 'new_value')
+
+ # Flush and validate trailer
+ $TEST_CLUSTER.flush(TableName.valueOf(table_name))
+ validate_hfile_trailer(table_name, cf_name, true, true, false, namespace)
+
+ puts " >> Migration completed for #{table_name} CF #{cf_name}"
+ end
+
+
+ def scan_and_validate_table(table_name, column_families)
+ puts " >> Scanning and validating existing data in #{table_name}"
+
+ test_table = table(table_name)
+ scan = Scan.new
+ scanner = test_table.table.getScanner(scan)
+
+ row_count = 0
+ while (result = scanner.next)
+ row_count += 1
+ assert_false(result.isEmpty)
+ end
+ scanner.close
+
+ assert(row_count > 0, "Expected to find existing data in #{table_name}")
+ puts " >> Found #{row_count} rows, all accessible"
+ end
+
+ def validate_hfile_trailer(table_name, cf_name, is_post_migration, is_key_management_enabled,
+ is_compacted, expected_namespace = nil)
+ context = is_post_migration ? 'migrated' : 'pre-migration'
+ puts " >> Validating HFile trailer for #{context} table #{table_name}, CF: #{cf_name}"
+
+ table_name_obj = TableName.valueOf(table_name)
+ region_servers = $TEST_CLUSTER.getRSForFirstRegionInTable(table_name_obj)
+ regions = region_servers.getRegions(table_name_obj)
+
+ regions.each do |region|
+ region.getStores.each do |store|
+ next unless store.getColumnFamilyName == cf_name
+ puts " >> store file count for CF: #{cf_name} in table: #{table_name} is #{store.getStorefiles.size}"
+ if is_compacted
+ assert_equal(1, store.getStorefiles.size)
+ else
+ assert_true(store.getStorefiles.size > 0)
+ end
+ store.getStorefiles.each do |storefile|
+ puts " >> Checking HFile trailer for storefile: #{storefile.getPath.getName} with sequence id: #{storefile.getMaxSequenceId} against max sequence id of store: #{store.getMaxSequenceId.getAsLong}"
+ # The flush would have created new HFiles, but the old would still be there
+ # so we need to make sure to check the latest store only.
+ next unless storefile.getMaxSequenceId == store.getMaxSequenceId.getAsLong
+ store_file_info = storefile.getFileInfo
+ next unless store_file_info
+
+ hfile_info = store_file_info.getHFileInfo
+ next unless hfile_info
+
+ trailer = hfile_info.getTrailer
+
+ assert_not_nil(trailer.getEncryptionKey)
+
+ if is_key_management_enabled
+ assert_not_nil(trailer.getKEKMetadata)
+ assert_not_equal(0, trailer.getKEKChecksum)
+ else
+ assert_nil(trailer.getKEKMetadata)
+ assert_equal(0, trailer.getKEKChecksum)
+ end
+
+ if is_post_migration
+ assert_equal(expected_namespace, trailer.getKeyNamespace)
+ puts " >> Trailer validation passed - namespace: #{trailer.getKeyNamespace}"
+ else
+ assert_nil(trailer.getKeyNamespace)
+ puts " >> Trailer validation passed - using legacy key format"
+ end
+ end
+ end
+ end
+ end
+
+
+ def cleanup_old_provider_and_validate
+ puts "\n--- Cleaning up old key provider and final validation ---"
+
+ # Remove old KeyProvider configurations
+ $TEST_CLUSTER.getConfiguration.unset(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY)
+ $TEST_CLUSTER.getConfiguration.unset(HConstants::CRYPTO_KEYPROVIDER_PARAMETERS_KEY)
+ $TEST_CLUSTER.getConfiguration.unset(HConstants::CRYPTO_MASTERKEY_NAME_CONF_KEY)
+
+ # Remove old keystore
+ FileUtils.rm_rf(@old_keystore_file) if File.directory?(@old_keystore_file)
+ puts " >> Removed old keystore and configuration"
+
+ # Restart cluster
+ $TEST.restartMiniCluster(KeymetaTableAccessor::KEY_META_TABLE_NAME)
+ puts " >> Cluster restarted without old key provider"
+ setup_hbase
+
+ # Validate all data is still accessible
+ validate_all_tables_final
+
+ # Perform major compaction and validate
+ perform_major_compaction_and_validate
+ end
+
+ def validate_all_tables_final
+ puts "\n--- Final validation - scanning all tables ---"
+
+ @tables_metadata.each do |table_name, metadata|
+ if metadata[:no_encryption]
+ next
+ end
+ puts " >> Final validation for table: #{table_name} with CFs: #{metadata[:cfs].join(', ')}"
+ scan_and_validate_table(table_name, metadata[:cfs])
+ puts " >> #{table_name} - all data accessible"
+ end
+ end
+
+ def perform_major_compaction_and_validate
+ puts "\n--- Performing major compaction and final validation ---"
+
+ $TEST_CLUSTER.compact(true)
+
+ @tables_metadata.each do |table_name, metadata|
+ if metadata[:no_encryption]
+ next
+ end
+ puts " >> Validating post-compaction HFiles for table: #{table_name} with CFs: #{metadata[:cfs].join(', ')}"
+ metadata[:cfs].each do |cf_name|
+ # When using random key from system key, there is no namespace
+ #next if metadata[:expected_namespace][cf_name] == '*'
+ validate_hfile_trailer(table_name, cf_name, true, true, true, metadata[:expected_namespace][cf_name])
+ end
+ end
+ end
+
+ # Utility methods
+
+ def extract_and_unwrap_keys_from_tables
+ puts " >> Extracting and unwrapping keys from encrypted tables"
+
+ keys = {}
+
+ # Reuse existing master key from old keystore as system key
+ old_key_provider = Encryption.getKeyProvider($TEST_CLUSTER.getConfiguration)
+ master_key_bytes = old_key_provider.getKey(@master_key_alias).getEncoded
+ keys['system_key'] = master_key_bytes
+
+ # Extract wrapped keys from table descriptors and unwrap them
+ # Only call extract_key_from_table for tables that have ENCRYPTION_KEY attribute
+
+ # For shared key tables (both use same key)
+ shared_key = extract_key_from_table(@table_shared_key1, 'f')
+ keys['shared_global_key'] = shared_key
+
+ # For table-level key
+ table_key = extract_key_from_table(@table_table_key, 'f')
+ keys["#{@table_table_key}_key"] = table_key
+
+ # For CF-level keys
+ cf1_key = extract_key_from_table(@table_cf_keys, 'cf1')
+ keys["#{@table_cf_keys}_cf1_key"] = cf1_key
+
+ cf2_key = extract_key_from_table(@table_cf_keys, 'cf2')
+ keys["#{@table_cf_keys}_cf2_key"] = cf2_key
+
+ puts " >> Extracted #{keys.size} keys for migration"
+ keys
+ end
+
+ def extract_key_from_table(table_name, cf_name)
+ # Get table descriptor
+ admin = $TEST_CLUSTER.getAdmin
+ table_descriptor = admin.getDescriptor(TableName.valueOf(table_name))
+ cf_descriptor = table_descriptor.getColumnFamily(Bytes.toBytes(cf_name))
+
+ # Get the wrapped key bytes from ENCRYPTION_KEY attribute
+ wrapped_key_bytes = cf_descriptor.getEncryptionKey
+
+ # Use EncryptionUtil.unwrapKey with master key alias as subject
+ unwrapped_key = EncryptionUtil.unwrapKey($TEST_CLUSTER.getConfiguration,
+ @master_key_alias, wrapped_key_bytes)
+
+ return unwrapped_key.getEncoded
+ end
+
+ def generate_key(alias_name)
+ MessageDigest.getInstance('SHA-256').digest(Bytes.toBytes(alias_name))
+ end
+
+ def create_keystore(keystore_path, key_entries)
+ store = KeyStore.getInstance('JCEKS')
+ password_chars = @keystore_password.to_java.toCharArray
+ store.load(nil, password_chars)
+
+ key_entries.each do |alias_name, key_bytes|
+ secret_key = SecretKeySpec.new(key_bytes, 'AES')
+ store.setEntry(alias_name, KeyStore::SecretKeyEntry.new(secret_key),
+ KeyStore::PasswordProtection.new(password_chars))
+ end
+
+ fos = FileOutputStream.new(keystore_path)
+ begin
+ store.store(fos, password_chars)
+ ensure
+ fos.close
+ end
+ end
+
+
+ def teardown
+ # Cleanup temporary test directories (keystore files will be cleaned up with the directories)
+ test_base_dir = $TEST_CLUSTER.getDataTestDir().toString
+ Dir.glob(File.join(test_base_dir, "*keystore_#{@test_timestamp}*")).each do |dir|
+ FileUtils.rm_rf(dir) if File.directory?(dir)
+ end
+ end
+ end
+end
diff --git a/hbase-shell/src/test/ruby/tests_runner.rb b/hbase-shell/src/test/ruby/tests_runner.rb
index 4e31b81535a7..4c93d8d872ba 100644
--- a/hbase-shell/src/test/ruby/tests_runner.rb
+++ b/hbase-shell/src/test/ruby/tests_runner.rb
@@ -40,6 +40,9 @@
end
files = Dir[ File.dirname(__FILE__) + "/" + test_suite_pattern ]
+if files.empty?
+ raise "No tests found for #{test_suite_pattern}"
+end
files.each do |file|
filename = File.basename(file)
if includes != nil && !includes.include?(filename)
From 020adaa0643a7564939a09e1701c62cfe849300a Mon Sep 17 00:00:00 2001
From: Hari Krishna Dara
Date: Mon, 27 Oct 2025 22:56:22 +0530
Subject: [PATCH 6/9] HBASE-29643: Admin API to trigger for System Key rotation
(#7394)
---
.rubocop.yml | 9 +
.../org/apache/hadoop/hbase/client/Admin.java | 6 +
.../hbase/client/AdminOverAsyncAdmin.java | 5 +
.../hadoop/hbase/client/AsyncAdmin.java | 6 +
.../hadoop/hbase/client/AsyncHBaseAdmin.java | 5 +
.../hbase/client/RawAsyncHBaseAdmin.java | 25 ++
.../hbase/keymeta/KeymetaAdminClient.java | 49 ++--
.../hadoop/hbase/io/crypto/Encryption.java | 13 +-
.../hadoop/hbase/keymeta/KeymetaAdmin.java | 16 +-
.../io/crypto/MockManagedKeyProvider.java | 17 ++
.../main/protobuf/server/ManagedKeys.proto | 24 +-
.../main/protobuf/server/region/Admin.proto | 3 +
.../apache/hadoop/hbase/HBaseServerBase.java | 13 +-
.../hbase/client/AsyncRegionServerAdmin.java | 5 +
.../hbase/keymeta/KeyManagementBase.java | 8 +-
.../hbase/keymeta/KeymetaAdminImpl.java | 66 ++++--
.../hbase/keymeta/KeymetaServiceEndpoint.java | 123 +++++-----
.../apache/hadoop/hbase/master/HMaster.java | 12 +
.../hbase/master/MasterRpcServices.java | 7 +
.../hadoop/hbase/master/MasterServices.java | 3 +
.../hbase/regionserver/RSRpcServices.java | 24 ++
.../hadoop/hbase/security/SecurityUtil.java | 22 +-
.../hbase/keymeta/TestKeymetaEndpoint.java | 160 +++++--------
.../hbase/keymeta/TestManagedKeymeta.java | 186 ++++++++++++---
.../hbase/master/MockNoopMasterServices.java | 5 +
.../hadoop/hbase/master/MockRegionServer.java | 7 +
.../hbase/master/TestKeymetaAdminImpl.java | 214 ++++++++++++++---
.../hbase/regionserver/TestRSRpcServices.java | 131 +++++++++++
.../hbase/rsgroup/VerifyingRSGroupAdmin.java | 5 +
.../src/main/ruby/hbase/keymeta_admin.rb | 17 +-
hbase-shell/src/main/ruby/shell.rb | 3 +-
.../shell/commands/enable_key_management.rb | 2 +-
.../main/ruby/shell/commands/rotate_stk.rb | 52 +++++
.../client/TestKeymetaMockProviderShell.java | 83 +++++++
.../src/test/ruby/shell/admin_keymeta_test.rb | 21 +-
.../shell/encrypted_table_keymeta_test.rb | 25 +-
.../key_provider_keymeta_migration_test.rb | 219 ++++++++++--------
.../rotate_stk_keymeta_mock_provider_test.rb | 59 +++++
.../hbase/thrift2/client/ThriftAdmin.java | 6 +
39 files changed, 1259 insertions(+), 397 deletions(-)
create mode 100644 hbase-shell/src/main/ruby/shell/commands/rotate_stk.rb
create mode 100644 hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMockProviderShell.java
create mode 100644 hbase-shell/src/test/ruby/shell/rotate_stk_keymeta_mock_provider_test.rb
diff --git a/.rubocop.yml b/.rubocop.yml
index f877a052eea6..e1eb10a9245b 100644
--- a/.rubocop.yml
+++ b/.rubocop.yml
@@ -9,3 +9,12 @@ Layout/LineLength:
Metrics/MethodLength:
Max: 75
+
+GlobalVars:
+ AllowedVariables:
+ - $CUST1_ENCODED
+ - $CUST1_ALIAS
+ - $CUST1_ENCODED
+ - $GLOB_CUST_ENCODED
+ - $TEST
+ - $TEST_CLUSTER
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 1c08ec3b26fd..078ac5997477 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2664,4 +2664,10 @@ List getLogEntries(Set serverNames, String logType, Server
@InterfaceAudience.Private
void restoreBackupSystemTable(String snapshotName) throws IOException;
+
+ /**
+ * Refresh the system key cache on all specified region servers.
+ * @param regionServers the list of region servers to refresh the system key cache on
+ */
+ void refreshSystemKeyCacheOnAllServers(Set regionServers) throws IOException;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
index e6bf6c3d28e0..5ae99b00a7e0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
@@ -1146,4 +1146,9 @@ public List getCachedFilesList(ServerName serverName) throws IOException
public void restoreBackupSystemTable(String snapshotName) throws IOException {
get(admin.restoreBackupSystemTable(snapshotName));
}
+
+ @Override
+ public void refreshSystemKeyCacheOnAllServers(Set regionServers) throws IOException {
+ get(admin.refreshSystemKeyCacheOnAllServers(regionServers));
+ }
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index ec0556f20ac1..a10746f2726b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -1874,4 +1874,10 @@ CompletableFuture> getLogEntries(Set serverNames, Str
@InterfaceAudience.Private
CompletableFuture restoreBackupSystemTable(String snapshotName);
+
+ /**
+ * Refresh the system key cache on all specified region servers.
+ * @param regionServers the list of region servers to refresh the system key cache on
+ */
+ CompletableFuture refreshSystemKeyCacheOnAllServers(Set regionServers);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index b1fb2be13547..d135063ec9a2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -686,6 +686,11 @@ public CompletableFuture updateConfiguration(String groupName) {
return wrap(rawAdmin.updateConfiguration(groupName));
}
+ @Override
+ public CompletableFuture refreshSystemKeyCacheOnAllServers(Set regionServers) {
+ return wrap(rawAdmin.refreshSystemKeyCacheOnAllServers(regionServers));
+ }
+
@Override
public CompletableFuture rollWALWriter(ServerName serverName) {
return wrap(rawAdmin.rollWALWriter(serverName));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 710c8c430386..8a5033ff9b18 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -150,6 +150,7 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.LastHighestWalFilenum;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
@@ -4662,4 +4663,28 @@ MasterProtos.RestoreBackupSystemTableResponse> procedureCall(request,
MasterProtos.RestoreBackupSystemTableResponse::getProcId,
new RestoreBackupSystemTableProcedureBiConsumer());
}
+
+ @Override
+ public CompletableFuture refreshSystemKeyCacheOnAllServers(Set regionServers) {
+ CompletableFuture future = new CompletableFuture<>();
+ List> futures =
+ regionServers.stream().map(this::refreshSystemKeyCache).collect(Collectors.toList());
+ addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture>[0])),
+ (result, err) -> {
+ if (err != null) {
+ future.completeExceptionally(err);
+ } else {
+ future.complete(result);
+ }
+ });
+ return future;
+ }
+
+ private CompletableFuture refreshSystemKeyCache(ServerName serverName) {
+ return this. newAdminCaller()
+ .action((controller, stub) -> this. adminCall(controller, stub,
+ EmptyMsg.getDefaultInstance(),
+ (s, c, req, done) -> s.refreshSystemKeyCache(controller, req, done), resp -> null))
+ .serverName(serverName).call();
+ }
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
index e72e3c978ada..01a5574443d5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
@@ -25,19 +25,18 @@
import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyResponse;
import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg;
@InterfaceAudience.Public
public class KeymetaAdminClient implements KeymetaAdmin {
- private static final Logger LOG = LoggerFactory.getLogger(KeymetaAdminClient.class);
private ManagedKeysProtos.ManagedKeysService.BlockingInterface stub;
public KeymetaAdminClient(Connection conn) throws IOException {
@@ -46,38 +45,54 @@ public KeymetaAdminClient(Connection conn) throws IOException {
}
@Override
- public List enableKeyManagement(String keyCust, String keyNamespace)
+ public ManagedKeyData enableKeyManagement(byte[] keyCust, String keyNamespace)
throws IOException {
try {
- ManagedKeysProtos.GetManagedKeysResponse response = stub.enableKeyManagement(null,
- ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build());
- return generateKeyDataList(response);
+ ManagedKeysProtos.ManagedKeyResponse response =
+ stub.enableKeyManagement(null, ManagedKeyRequest.newBuilder()
+ .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build());
+ return generateKeyData(response);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
}
@Override
- public List getManagedKeys(String keyCust, String keyNamespace)
+ public List getManagedKeys(byte[] keyCust, String keyNamespace)
throws IOException, KeyException {
try {
- ManagedKeysProtos.GetManagedKeysResponse statusResponse = stub.getManagedKeys(null,
- ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build());
+ ManagedKeysProtos.GetManagedKeysResponse statusResponse =
+ stub.getManagedKeys(null, ManagedKeyRequest.newBuilder()
+ .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build());
return generateKeyDataList(statusResponse);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
}
+ @Override
+ public boolean rotateSTK() throws IOException {
+ try {
+ ManagedKeysProtos.RotateSTKResponse response =
+ stub.rotateSTK(null, EmptyMsg.getDefaultInstance());
+ return response.getRotated();
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
private static List
generateKeyDataList(ManagedKeysProtos.GetManagedKeysResponse stateResponse) {
List keyStates = new ArrayList<>();
- for (ManagedKeysResponse state : stateResponse.getStateList()) {
- keyStates
- .add(new ManagedKeyData(state.getKeyCustBytes().toByteArray(), state.getKeyNamespace(),
- null, ManagedKeyState.forValue((byte) state.getKeyState().getNumber()),
- state.getKeyMetadata(), state.getRefreshTimestamp()));
+ for (ManagedKeyResponse state : stateResponse.getStateList()) {
+ keyStates.add(generateKeyData(state));
}
return keyStates;
}
+
+ private static ManagedKeyData generateKeyData(ManagedKeysProtos.ManagedKeyResponse response) {
+ return new ManagedKeyData(response.getKeyCust().toByteArray(), response.getKeyNamespace(), null,
+ ManagedKeyState.forValue((byte) response.getKeyState().getNumber()),
+ response.getKeyMetadata(), response.getRefreshTimestamp());
+ }
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index e8d965adebba..56a6ad211731 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -506,12 +506,11 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o
// is configured
String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY);
if (alternateAlgorithm != null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Unable to decrypt data with current cipher algorithm '"
- + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
- + "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm
- + "' configured.");
- }
+ LOG.debug(
+ "Unable to decrypt data with current cipher algorithm '{}'. "
+ + "Trying with the alternate cipher algorithm '{}' configured.",
+ conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES),
+ alternateAlgorithm);
Cipher alterCipher = Encryption.getCipher(conf, alternateAlgorithm);
if (alterCipher == null) {
throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available");
@@ -575,7 +574,7 @@ private static Object createProvider(final Configuration conf, String classNameK
throw new RuntimeException(e);
}
keyProviderCache.put(providerCacheKey, provider);
- LOG.debug("Installed " + providerClassName + " into key provider cache");
+ LOG.debug("Installed {} into key provider cache", providerClassName);
}
return provider;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
index be4f36d88023..4bf79090c3be 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
@@ -31,23 +31,31 @@
public interface KeymetaAdmin {
/**
* Enables key management for the specified custodian and namespace.
- * @param keyCust The key custodian in base64 encoded format.
+ * @param keyCust The key custodian identifier.
* @param keyNamespace The namespace for the key management.
* @return The list of {@link ManagedKeyData} objects each identifying the key and its current
* status.
* @throws IOException if an error occurs while enabling key management.
*/
- List enableKeyManagement(String keyCust, String keyNamespace)
+ ManagedKeyData enableKeyManagement(byte[] keyCust, String keyNamespace)
throws IOException, KeyException;
/**
* Get the status of all the keys for the specified custodian.
- * @param keyCust The key custodian in base64 encoded format.
+ * @param keyCust The key custodian identifier.
* @param keyNamespace The namespace for the key management.
* @return The list of {@link ManagedKeyData} objects each identifying the key and its current
* status.
* @throws IOException if an error occurs while enabling key management.
*/
- List |