diff --git a/examples/sdk_examples/action_report/action_report.py b/examples/sdk_examples/action_report/action_report.py new file mode 100644 index 00000000..b52f5ab3 --- /dev/null +++ b/examples/sdk_examples/action_report/action_report.py @@ -0,0 +1,125 @@ +"""Enterprise Action Report SDK Example.""" + +import getpass +import sqlite3 + +from keepersdk.authentication import configuration, endpoint, keeper_auth, login_auth +from keepersdk.constants import KEEPER_PUBLIC_HOSTS +from keepersdk.enterprise import action_report, enterprise_loader, sqlite_enterprise_storage +from keepersdk.errors import KeeperApiError + +TARGET_STATUS = 'no-logon' +DAYS_SINCE = 30 + + +def login(): + config = configuration.JsonConfigurationStorage() + + if not config.get().last_server: + print("Available server options:") + for region, host in KEEPER_PUBLIC_HOSTS.items(): + print(f" {region}: {host}") + server = input('Enter server (default: keepersecurity.com): ').strip() or 'keepersecurity.com' + config.get().last_server = server + else: + server = config.get().last_server + + keeper_endpoint = endpoint.KeeperEndpoint(config, server) + login_auth_context = login_auth.LoginAuth(keeper_endpoint) + username = config.get().last_login or input('Enter username: ') + + login_auth_context.resume_session = True + login_auth_context.login(username) + + while not login_auth_context.login_step.is_final(): + step = login_auth_context.login_step + if isinstance(step, login_auth.LoginStepDeviceApproval): + step.send_push(login_auth.DeviceApprovalChannel.KeeperPush) + print("Device approval request sent. Approve this device and press Enter.") + input() + elif isinstance(step, login_auth.LoginStepPassword): + step.verify_password(getpass.getpass('Enter password: ')) + elif isinstance(step, login_auth.LoginStepTwoFactor): + channel = step.get_channels()[0] + step.send_code(channel.channel_uid, getpass.getpass(f'Enter 2FA code for {channel.channel_name}: ')) + else: + raise NotImplementedError(f"Unsupported login step: {type(step).__name__}") + + if isinstance(login_auth_context.login_step, login_auth.LoginStepConnected): + return login_auth_context.login_step.take_keeper_auth() + return None + + +def print_report(entries, target_status, days_since): + action_text = ( + '\tCOMMAND: NONE (No action specified)\n' + '\tSTATUS: n/a\n' + '\tSERVER MESSAGE: n/a\n' + '\tAFFECTED: 0' + ) + status_display = target_status[0].upper() + target_status[1:] + + print(f'\nAdmin Action Taken:\n{action_text}\n') + print('Note: the following reflects data prior to any administrative action being applied') + print(f'{len(entries)} User(s) With "{status_display}" Status Older Than {days_since} Day(s):\n') + + if not entries: + return + + headers = ['User ID', 'Email', 'Name', 'Status', 'Transfer Status', 'Node'] + col_widths = [14, 31, 22, 8, 17, 19] + + print(' '.join(f'{h:<{w}}' for h, w in zip(headers, col_widths))) + print(' '.join('-' * w for w in col_widths)) + + for entry in entries: + row = [ + str(entry.enterprise_user_id), entry.email, entry.full_name, + entry.status, entry.transfer_status, entry.node_path + ] + print(' '.join(f'{str(v)[:w]:<{w}}' for v, w in zip(row, col_widths))) + + +def generate_action_report(keeper_auth_context: keeper_auth.KeeperAuth): + if not keeper_auth_context.auth_context.is_enterprise_admin: + print("ERROR: This operation requires enterprise admin privileges.") + keeper_auth_context.close() + return + + enterprise = None + try: + conn = sqlite3.Connection('file::memory:', uri=True) + enterprise_id = keeper_auth_context.auth_context.enterprise_id or 0 + storage = sqlite_enterprise_storage.SqliteEnterpriseStorage(lambda: conn, enterprise_id) + enterprise = enterprise_loader.EnterpriseLoader(keeper_auth_context, storage) + + config = action_report.ActionReportConfig( + target_user_status=TARGET_STATUS, + days_since=DAYS_SINCE + ) + generator = action_report.ActionReportGenerator( + enterprise.enterprise_data, keeper_auth_context, loader=enterprise, config=config + ) + entries = generator.generate_report() + print_report(entries, TARGET_STATUS, DAYS_SINCE) + + except KeeperApiError as e: + print(f"\nAPI Error: {e}") + except Exception as e: + print(f"\nError: {e}") + finally: + if enterprise: + enterprise.close() + keeper_auth_context.close() + + +def main(): + auth = login() + if auth: + generate_action_report(auth) + else: + print("Login failed.") + + +if __name__ == "__main__": + main() diff --git a/examples/sdk_examples/aging_report/aging_report.py b/examples/sdk_examples/aging_report/aging_report.py new file mode 100644 index 00000000..daadad66 --- /dev/null +++ b/examples/sdk_examples/aging_report/aging_report.py @@ -0,0 +1,123 @@ +"""Example: Password aging report using Keeper SDK.""" + +import datetime +import getpass +import sqlite3 +import sys +import traceback + +from keepersdk.authentication import login_auth, configuration, endpoint, keeper_auth +from keepersdk.enterprise import enterprise_loader, sqlite_enterprise_storage, aging_report +from keepersdk.errors import KeeperApiError + + +TABLE_WIDTH = 140 +COL_WIDTHS = (30, 30, 25, 10, 45) +HEADERS = ['Owner', 'Title', 'Password Changed', 'Shared', 'Record URL'] + + +def login(): + config = configuration.JsonConfigurationStorage() + server = config.get().last_server or 'keepersecurity.com' + + keeper_endpoint = endpoint.KeeperEndpoint(config, server) + auth_context = login_auth.LoginAuth(keeper_endpoint) + auth_context.resume_session = True + + username = config.get().last_login + if not username: + print("Error: No saved login found. Please run with interactive login first.") + return None, None + + auth_context.login(username) + + while not auth_context.login_step.is_final(): + step = auth_context.login_step + if isinstance(step, login_auth.LoginStepDeviceApproval): + step.send_push(login_auth.DeviceApprovalChannel.KeeperPush) + print("Device approval required. Approve and press Enter.") + input() + elif isinstance(step, login_auth.LoginStepPassword): + step.verify_password(getpass.getpass('Enter password: ')) + elif isinstance(step, login_auth.LoginStepTwoFactor): + channel = step.get_channels()[0] + code = getpass.getpass(f'Enter 2FA code for {channel.channel_name}: ') + step.send_code(channel.channel_uid, code) + else: + raise NotImplementedError(f"Unsupported login step: {type(step).__name__}") + + if isinstance(auth_context.login_step, login_auth.LoginStepConnected): + return auth_context.login_step.take_keeper_auth(), server + return None, None + + +def format_row(values): + return ' '.join( + f"{str(val or '')[:w-1]:<{w}}" + for val, w in zip(values, COL_WIDTHS + (20,) * (len(values) - len(COL_WIDTHS))) + ) + + +def print_report(rows, title): + print(f"\n{title}") + print('=' * TABLE_WIDTH) + print(format_row(HEADERS)) + print('-' * TABLE_WIDTH) + + for row in rows: + display_row = list(row) + display_row[3] = 'True' if display_row[3] else 'False' + print(format_row(display_row)) + + print('=' * TABLE_WIDTH) + print(f"\nFound {len(rows)} record(s) with aging passwords") + + +def generate_report(auth: keeper_auth.KeeperAuth, server: str): + if not auth.auth_context.is_enterprise_admin: + print("ERROR: This operation requires enterprise admin privileges.") + return 1 + + enterprise = None + try: + conn = sqlite3.Connection('file::memory:', uri=True) + enterprise_id = auth.auth_context.enterprise_id or 0 + storage = sqlite_enterprise_storage.SqliteEnterpriseStorage(lambda: conn, enterprise_id) + enterprise = enterprise_loader.EnterpriseLoader(auth, storage) + + print('\nThe default password aging period is 3 months\n') + print('Loading record password change information...') + + config = aging_report.AgingReportConfig(server=server) + generator = aging_report.AgingReportGenerator(enterprise.enterprise_data, auth, config) + rows = list(generator.generate_report_rows()) + + cutoff_dt = datetime.datetime.now() - datetime.timedelta(days=aging_report.DEFAULT_PERIOD_DAYS) + title = f'Aging Report: Records With Passwords Last Modified Before {cutoff_dt.strftime("%Y/%m/%d %H:%M:%S")}' + + print_report(rows, title) + return 0 + + except KeeperApiError as e: + print(f"API Error: {e}") + return 1 + except Exception as e: + print(f"Error generating aging report: {e}") + traceback.print_exc() + return 1 + finally: + if enterprise: + enterprise.close() + auth.close() + + +def main(): + auth, server = login() + if not auth: + print("Login failed. Unable to generate aging report.") + return 1 + return generate_report(auth, server) + + +if __name__ == "__main__": + sys.exit(main() or 0) diff --git a/examples/sdk_examples/share_report/share_report.py b/examples/sdk_examples/share_report/share_report.py new file mode 100644 index 00000000..f664d866 --- /dev/null +++ b/examples/sdk_examples/share_report/share_report.py @@ -0,0 +1,231 @@ +"""Share Report SDK Example - Demonstrates generating share reports for vault records and folders.""" + +import getpass +import sqlite3 +import traceback +from typing import Optional, List, Tuple + +from keepersdk.authentication import login_auth, configuration, endpoint, keeper_auth +from keepersdk.vault import share_report, vault_online +from keepersdk.vault.sqlite_storage import SqliteVaultStorage +from keepersdk.errors import KeeperApiError +from keepersdk.constants import KEEPER_PUBLIC_HOSTS + + +TABLE_WIDTH = 120 +RECORD_COL_WIDTHS = (36, 30, 25, 30) +FOLDER_COL_WIDTHS = (25, 30, 25, 25, 30) +SUMMARY_COL_WIDTHS = (40, 15, 20) + + +def login() -> Optional[keeper_auth.KeeperAuth]: + """Handle the login process including server selection and authentication.""" + config = configuration.JsonConfigurationStorage() + server = _get_server(config) + + keeper_endpoint = endpoint.KeeperEndpoint(config, server) + login_auth_context = login_auth.LoginAuth(keeper_endpoint) + username = config.get().last_login or input('Enter username: ') + + login_auth_context.resume_session = True + login_auth_context.login(username) + + logged_in_with_persistent = _complete_login_steps(login_auth_context) + + if logged_in_with_persistent: + print("Successfully logged in with persistent login") + + if isinstance(login_auth_context.login_step, login_auth.LoginStepConnected): + return login_auth_context.login_step.take_keeper_auth() + return None + + +def _get_server(config: configuration.JsonConfigurationStorage) -> str: + """Get server from config or prompt user.""" + if config.get().last_server: + return config.get().last_server + + print("Available server options:") + for region, host in KEEPER_PUBLIC_HOSTS.items(): + print(f" {region}: {host}") + server = input('Enter server (default: keepersecurity.com): ').strip() or 'keepersecurity.com' + config.get().last_server = server + return server + + +def _complete_login_steps(login_auth_context: login_auth.LoginAuth) -> bool: + """Complete all login steps, returns True if used persistent login.""" + logged_in_with_persistent = True + + while not login_auth_context.login_step.is_final(): + step = login_auth_context.login_step + + if isinstance(step, login_auth.LoginStepDeviceApproval): + step.send_push(login_auth.DeviceApprovalChannel.KeeperPush) + print("Device approval request sent. Approve this device and press Enter to continue.") + input() + elif isinstance(step, login_auth.LoginStepPassword): + step.verify_password(getpass.getpass('Enter password: ')) + elif isinstance(step, login_auth.LoginStepTwoFactor): + channel = step.get_channels()[0] + code = getpass.getpass(f'Enter 2FA code for {channel.channel_name}: ') + step.send_code(channel.channel_uid, code) + else: + raise NotImplementedError(f"Unsupported login step: {type(step).__name__}") + + logged_in_with_persistent = False + + return logged_in_with_persistent + + +def format_row(values: List, widths: Tuple) -> str: + """Format a row of values according to column widths.""" + return ' '.join( + f"{str(val if val is not None else '')[:w-1]:<{w}}" + for val, w in zip(values, widths) + ) + + +def print_table(title: str, headers: List[str], rows: List[List], widths: Tuple, empty_msg: str) -> None: + """Print a formatted table with headers and rows.""" + print(f"\n{'=' * TABLE_WIDTH}") + print(title) + print('=' * TABLE_WIDTH) + + if not rows: + print(empty_msg) + return + + print(format_row(headers, widths)) + print('-' * TABLE_WIDTH) + for row in rows: + print(format_row(row, widths)) + print('=' * TABLE_WIDTH) + print(f"\nTotal: {len(rows)}") + + +def print_records_report(entries) -> None: + """Print the share report for records.""" + rows = [ + [e.record_uid, e.record_title or '', e.record_owner or '', e.shared_with_count] + for e in entries + ] + print_table( + "SHARED RECORDS REPORT", + ['Record UID', 'Title', 'Owner', 'Shared With Count'], + rows, RECORD_COL_WIDTHS, + "No shared records found." + ) + + +def print_folders_report(entries) -> None: + """Print the share report for shared folders.""" + rows = [ + [e.folder_uid or '', e.folder_name or '', e.shared_to or '', e.permissions or '', e.folder_path or ''] + for e in entries + ] + print_table( + "SHARED FOLDERS REPORT", + ['Folder UID', 'Folder Name', 'Shared To', 'Permissions', 'Path'], + rows, FOLDER_COL_WIDTHS, + "No shared folders found." + ) + + +def print_summary_report(entries) -> None: + """Print the summary report showing shares by target.""" + rows = [ + [e.shared_to or '', e.record_count or '-', e.shared_folder_count or '-'] + for e in entries + ] + print_table( + "SHARE SUMMARY REPORT", + ['Shared To', 'Records', 'Shared Folders'], + rows, SUMMARY_COL_WIDTHS, + "No shares found." + ) + + +def generate_share_reports(keeper_auth_context: keeper_auth.KeeperAuth) -> None: + """Generate and display share reports for the vault.""" + vault = None + try: + conn = sqlite3.Connection('file::memory:', uri=True) + vault_owner = bytes(keeper_auth_context.auth_context.username, 'utf-8') + vault = vault_online.VaultOnline(keeper_auth_context, SqliteVaultStorage(lambda: conn, vault_owner)) + + print("\nSyncing vault data...") + vault.sync_down() + print(f"Vault synced: {vault.vault_data.record_count} records, " + f"{vault.vault_data.shared_folder_count} shared folders") + + _generate_all_reports(vault) + + except KeeperApiError as e: + print(f"\nAPI Error: {e}") + except Exception as e: + print(f"\nError generating share reports: {e}") + traceback.print_exc() + finally: + if vault: + vault.close() + keeper_auth_context.close() + + +def _create_report_generator( + vault: vault_online.VaultOnline, + *, + show_ownership: bool = False, + folders_only: bool = False +) -> share_report.ShareReportGenerator: + """Create a ShareReportGenerator with the specified configuration. + + Args: + vault: The VaultOnline instance + show_ownership: Include ownership information in the report + folders_only: Generate report for shared folders only + + Returns: + Configured ShareReportGenerator instance + """ + return share_report.ShareReportGenerator( + vault=vault, + config=share_report.ShareReportConfig( + show_ownership=show_ownership, + folders_only=folders_only, + ), + ) + + +def _generate_all_reports(vault: vault_online.VaultOnline) -> None: + """Generate all three report types.""" + print("\nGenerating shared records report...") + records_generator = _create_report_generator(vault, show_ownership=True) + print_records_report(records_generator.generate_records_report()) + + print("\nGenerating shared folders report...") + folders_generator = _create_report_generator(vault, folders_only=True) + print_folders_report(folders_generator.generate_shared_folders_report()) + + print("\nGenerating share summary report...") + summary_generator = _create_report_generator(vault) + print_summary_report(summary_generator.generate_summary_report()) + + +def main() -> None: + """Main entry point for the share report script.""" + print("=" * 60) + print("Keeper Vault Share Report Generator") + print("=" * 60) + print("\nThis tool generates share reports for records and folders in your Keeper vault.\n") + + keeper_auth_context = login() + if keeper_auth_context: + generate_share_reports(keeper_auth_context) + else: + print("Login failed. Unable to generate share reports.") + + +if __name__ == "__main__": + main() + diff --git a/examples/sdk_examples/shared_records_report/shared_records_report.py b/examples/sdk_examples/shared_records_report/shared_records_report.py new file mode 100644 index 00000000..b78951e4 --- /dev/null +++ b/examples/sdk_examples/shared_records_report/shared_records_report.py @@ -0,0 +1,151 @@ +"""Shared Records Report SDK Example - Generates a report of shared records. + +Usage: + python shared_records_report.py +""" + +import getpass +import sqlite3 +import traceback +from typing import Optional, List + +from keepersdk.authentication import login_auth, configuration, endpoint, keeper_auth +from keepersdk.vault import shared_records_report, vault_online +from keepersdk.vault.sqlite_storage import SqliteVaultStorage +from keepersdk.enterprise import enterprise_loader, sqlite_enterprise_storage +from keepersdk.errors import KeeperApiError +from keepersdk.constants import KEEPER_PUBLIC_HOSTS + + +def login() -> Optional[keeper_auth.KeeperAuth]: + """Handle login with persistent session support.""" + config = configuration.JsonConfigurationStorage() + + if not config.get().last_server: + print("Available server options:") + for region, host in KEEPER_PUBLIC_HOSTS.items(): + print(f" {region}: {host}") + server = input('Enter server (default: keepersecurity.com): ').strip() or 'keepersecurity.com' + config.get().last_server = server + + keeper_endpoint = endpoint.KeeperEndpoint(config, config.get().last_server) + login_auth_context = login_auth.LoginAuth(keeper_endpoint) + username = config.get().last_login or input('Enter username: ') + + login_auth_context.resume_session = True + login_auth_context.login(username) + + while not login_auth_context.login_step.is_final(): + step = login_auth_context.login_step + if isinstance(step, login_auth.LoginStepDeviceApproval): + step.send_push(login_auth.DeviceApprovalChannel.KeeperPush) + print("Device approval request sent. Approve and press Enter.") + input() + elif isinstance(step, login_auth.LoginStepPassword): + step.verify_password(getpass.getpass('Enter password: ')) + elif isinstance(step, login_auth.LoginStepTwoFactor): + channel = step.get_channels()[0] + step.send_code(channel.channel_uid, getpass.getpass(f'Enter 2FA code: ')) + else: + raise NotImplementedError(f"Unsupported: {type(step).__name__}") + + if isinstance(login_auth_context.login_step, login_auth.LoginStepConnected): + return login_auth_context.login_step.take_keeper_auth() + return None + + +def print_report(entries: List[shared_records_report.SharedRecordReportEntry]) -> None: + """Print the shared records report in table format.""" + if not entries: + print("No shared records found.") + return + + # Headers and column widths + headers = ['#', 'Record UID', 'Title', 'Share Type', 'Shared To', 'Permissions', 'Folder Path'] + widths = [4, 24, 28, 20, 28, 18, 30] + + # Print header + header_row = ' '.join(f"{h:<{w}}" for h, w in zip(headers, widths)) + print(header_row) + print('-' * len(header_row)) + + # Print rows + for i, e in enumerate(entries, 1): + row = [ + str(i), + e.record_uid[:22] + '..' if len(e.record_uid) > 24 else e.record_uid, + e.title[:26] + '..' if len(e.title) > 28 else e.title, + e.share_type, + e.shared_to[:26] + '..' if len(e.shared_to) > 28 else e.shared_to, + e.permissions, + e.folder_path.replace('\n', ' | ')[:28] + '..' if len(e.folder_path) > 30 else e.folder_path.replace('\n', ' | ') + ] + print(' '.join(f"{v:<{w}}" for v, w in zip(row, widths))) + + +def main() -> None: + """Main entry point.""" + vault = None + enterprise = None + keeper_auth_context = None + + try: + keeper_auth_context = login() + if not keeper_auth_context: + print("Login failed.") + return + + # Initialize vault + vault_conn = sqlite3.Connection('file::memory:', uri=True) + vault_owner = bytes(keeper_auth_context.auth_context.username, 'utf-8') + vault = vault_online.VaultOnline( + keeper_auth_context, + SqliteVaultStorage(lambda: vault_conn, vault_owner) + ) + vault.sync_down() + + # Initialize enterprise data if admin (for team expansion) + enterprise_data = None + if keeper_auth_context.auth_context.is_enterprise_admin: + enterprise_conn = sqlite3.Connection('file::memory:', uri=True) + enterprise_id = keeper_auth_context.auth_context.enterprise_id or 0 + enterprise_storage = sqlite_enterprise_storage.SqliteEnterpriseStorage( + lambda: enterprise_conn, enterprise_id + ) + enterprise = enterprise_loader.EnterpriseLoader(keeper_auth_context, enterprise_storage) + enterprise_data = enterprise.enterprise_data + else: + print("Admin access is not present, use an admin account for full team expansion features.") + + # Generate report (default: owned records only) + config = shared_records_report.SharedRecordsReportConfig( + show_team_users=False, + all_records=False + ) + + generator = shared_records_report.SharedRecordsReportGenerator( + vault=vault, + enterprise=enterprise_data, + auth=keeper_auth_context, + config=config + ) + + entries = generator.generate_report() + print_report(entries) + + except KeeperApiError as e: + print(f"API Error: {e}") + except Exception as e: + print(f"Error: {e}") + traceback.print_exc() + finally: + if enterprise: + enterprise.close() + if vault: + vault.close() + if keeper_auth_context: + keeper_auth_context.close() + + +if __name__ == "__main__": + main() diff --git a/keepercli-package/src/keepercli/commands/action_report.py b/keepercli-package/src/keepercli/commands/action_report.py new file mode 100644 index 00000000..18b0e269 --- /dev/null +++ b/keepercli-package/src/keepercli/commands/action_report.py @@ -0,0 +1,371 @@ +"""Action report command for Keeper CLI.""" + +import argparse +import os +from typing import Any, List, Optional + +from keepersdk.authentication import keeper_auth +from keepersdk.enterprise import action_report, account_transfer, batch_management, enterprise_management, enterprise_types + +from . import base +from .. import api, prompt_utils +from ..helpers import report_utils +from ..params import KeeperParams + + +class ActionReportCommand(base.ArgparseCommand, enterprise_management.IEnterpriseManagementLogger): + def __init__(self): + parser = argparse.ArgumentParser( + prog='action-report', + description='Run an action report based on user activity.', + parents=[base.report_output_parser] + ) + ActionReportCommand.add_arguments_to_parser(parser) + super().__init__(parser) + self.logger = api.get_logger() + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + '--target', '-t', + dest='target_user_status', + action='store', + choices=['no-logon', 'no-update', 'locked', 'invited', 'no-recovery'], + default='no-logon', + help='User status to report on. Default: no-logon' + ) + parser.add_argument( + '--days-since', '-d', + dest='days_since', + type=int, + action='store', + help='Number of days since event of interest (e.g., login, record add/update, lock). ' + 'Default: 30 (or 90 for locked users)' + ) + parser.add_argument( + '--columns', + dest='columns', + action='store', + type=str, + help='Comma-separated list of columns to show on report. ' + 'Supported: name, status, transfer_status, node, team_count, teams, role_count, roles, alias, 2fa_enabled' + ) + parser.add_argument( + '--apply-action', '-a', + dest='apply_action', + action='store', + choices=['lock', 'delete', 'transfer', 'none'], + default='none', + help='Admin action to apply to each user in the report. Default: none' + ) + parser.add_argument( + '--target-user', + dest='target_user', + action='store', + help='Username/email of account to transfer users to when --apply-action=transfer is specified' + ) + parser.add_argument( + '--dry-run', '-n', + dest='dry_run', + action='store_true', + default=False, + help='Enable dry-run mode (preview actions without executing)' + ) + parser.add_argument( + '--force', '-f', + dest='force', + action='store_true', + help='Skip confirmation prompt when applying irreversible admin actions (e.g., delete, transfer)' + ) + parser.add_argument( + '--node', + dest='node', + action='store', + help='Filter users by node (node name or ID)' + ) + + def warning(self, message: str) -> None: + self.logger.warning(message) + + def execute(self, context: KeeperParams, **kwargs) -> Any: + base.require_login(context) + base.require_enterprise_admin(context) + + target_status = kwargs.get('target_user_status', 'no-logon') + days_since = kwargs.get('days_since') + node_name = kwargs.get('node') + apply_action = kwargs.get('apply_action', 'none') + target_user = kwargs.get('target_user') + dry_run = kwargs.get('dry_run', False) + force = kwargs.get('force', False) + output_format = kwargs.get('format', 'table') + output_file = kwargs.get('output') + + if node_name is not None and (not isinstance(node_name, str) or not node_name.strip()): + self.logger.warning('Please provide node name or node ID. The --node parameter cannot be empty.') + return + + allowed_actions = action_report.ActionReportGenerator.get_allowed_actions(target_status) + if apply_action not in allowed_actions: + self.logger.warning( + f'Action \'{apply_action}\' not allowed on \'{target_status}\' users: ' + f'value must be one of {allowed_actions}' + ) + return + + if apply_action == 'transfer' and not target_user: + self.logger.warning('--target-user is required when --apply-action=transfer is specified') + return + + if days_since is None: + days_since = 90 if target_status == 'locked' else 30 + + config = action_report.ActionReportConfig( + target_user_status=target_status, + days_since=days_since, + node_name=node_name, + apply_action=apply_action, + target_user=target_user, + dry_run=dry_run, + force=force + ) + + generator = action_report.ActionReportGenerator( + context.enterprise_data, context.auth, config=config + ) + + report_entries = generator.generate_report() + action_result = self._apply_admin_action(context, report_entries, config) + rows, headers = self._generate_output(report_entries, output_format, kwargs.get('columns')) + + status_display = target_status[0].upper() + target_status[1:] if target_status else target_status + title = f'Admin Action Taken:\n{action_result.to_text()}\n' + title += '\nNote: the following reflects data prior to any administrative action being applied' + title += f'\n{len(report_entries)} User(s) With "{status_display}" Status Older Than {days_since} Day(s)' + if node_name: + title += f' in Node "{node_name}"' + title += ': ' + + result = report_utils.dump_report_data( + rows, headers, fmt=output_format, filename=output_file, title=title + ) + + if output_file: + _, ext = os.path.splitext(output_file) + if not ext: + output_file += '.json' if output_format == 'json' else '.csv' + self.logger.info(f'Report saved to: {os.path.abspath(output_file)}') + + if apply_action != 'none' and not dry_run and action_result.affected_count > 0: + context.enterprise_loader.load() + + return result + + def _generate_output( + self, + entries: List[action_report.ActionReportEntry], + output_format: str, + columns_filter: Optional[str] + ) -> tuple: + all_columns = { + 'user_id': lambda e: e.enterprise_user_id, + 'email': lambda e: e.email, + 'name': lambda e: e.full_name, + 'status': lambda e: e.status, + 'transfer_status': lambda e: e.transfer_status, + 'node': lambda e: e.node_path, + 'roles': lambda e: e.roles or [], + 'role_count': lambda e: len(e.roles) if e.roles else 0, + 'teams': lambda e: e.teams or [], + 'team_count': lambda e: len(e.teams) if e.teams else 0, + '2fa_enabled': lambda e: e.tfa_enabled, + } + + if columns_filter: + requested_cols = [c.strip().lower() for c in columns_filter.split(',')] + columns = ['user_id'] + for col in requested_cols: + if col in all_columns and col not in columns: + columns.append(col) + else: + columns = ['user_id', 'email', 'name', 'status', 'transfer_status', 'node'] + + rows = [[all_columns[col](entry) for col in columns] for entry in entries] + headers = columns if output_format == 'json' else [report_utils.field_to_title(h) for h in columns] + return rows, headers + + def _apply_admin_action( + self, + context: KeeperParams, + entries: List[action_report.ActionReportEntry], + config: action_report.ActionReportConfig + ) -> action_report.ActionResult: + action = config.apply_action + + if action == 'none' or not entries: + return action_report.ActionResult(action='NONE (No action specified)', status='n/a', affected_count=0) + + if config.dry_run: + return action_report.ActionResult(action=action, status='dry run', affected_count=0) + + if action in ('delete', 'transfer') and not config.force: + emails = [e.email for e in entries] + alert = prompt_utils.get_formatted_text('\nALERT!\n', prompt_utils.COLORS.FAIL) + prompt_utils.output_text( + alert, + f'\nYou are about to {action} the following accounts:\n' + + '\n'.join(f'{idx + 1}) {email}' for idx, email in enumerate(emails)) + + '\n\nThis action cannot be undone.\n' + ) + answer = prompt_utils.user_choice('Do you wish to proceed?', 'yn', 'n') + if answer.lower() != 'y': + return action_report.ActionResult( + action=action, + status='Cancelled by user', + affected_count=0 + ) + + try: + if action == 'lock': + return self._lock_users(context.enterprise_loader, entries) + elif action == 'delete': + return self._delete_users(context.enterprise_loader, entries) + elif action == 'transfer': + return self._transfer_users(context, entries, config.target_user) + return action_report.ActionResult(action=action, status='unknown', affected_count=0) + except Exception as e: + self.logger.warning(f'Action failed: {e}') + return action_report.ActionResult( + action=action, status='fail', affected_count=0, server_message=str(e) + ) + + def _lock_users( + self, loader: enterprise_types.IEnterpriseLoader, entries: List[action_report.ActionReportEntry] + ) -> action_report.ActionResult: + batch = batch_management.BatchManagement(loader=loader, logger=self) + user_ids = [e.enterprise_user_id for e in entries] + batch.user_actions(to_lock=user_ids) + + try: + batch.apply() + return action_report.ActionResult( + action='lock', + status='success', + affected_count=len(entries) + ) + except Exception as e: + return action_report.ActionResult( + action='lock', + status='fail', + affected_count=0, + server_message=str(e) + ) + + def _delete_users( + self, loader: enterprise_types.IEnterpriseLoader, entries: List[action_report.ActionReportEntry] + ) -> action_report.ActionResult: + batch = batch_management.BatchManagement(loader=loader, logger=self) + users_to_delete = [ + enterprise_management.UserEdit(enterprise_user_id=e.enterprise_user_id) + for e in entries + ] + batch.modify_users(to_remove=users_to_delete) + + try: + batch.apply() + return action_report.ActionResult( + action='delete', + status='success', + affected_count=len(entries) + ) + except Exception as e: + return action_report.ActionResult( + action='delete', + status='fail', + affected_count=0, + server_message=str(e) + ) + + def _transfer_users( + self, context: KeeperParams, entries: List[action_report.ActionReportEntry], target_user: Optional[str] + ) -> action_report.ActionResult: + if not target_user: + return action_report.ActionResult( + action='transfer', + status='fail', + affected_count=0, + server_message='No transfer target specified' + ) + + target = target_user.lower().strip() + + target_user_obj = next( + (u for u in context.enterprise_data.users.get_all_entities() + if u.username.lower() == target and u.status == 'active' and u.lock == 0), + None + ) + if not target_user_obj: + return action_report.ActionResult( + action='transfer', status='fail', affected_count=0, + server_message=f'Invalid transfer target: {target}' + ) + + if target in {e.email.lower() for e in entries}: + return action_report.ActionResult( + action='transfer', status='fail', affected_count=0, + server_message='Cannot transfer user to themselves' + ) + + target_keys = self._load_user_public_keys(context.auth, target) + if not target_keys: + return action_report.ActionResult( + action='transfer', status='fail', affected_count=0, + server_message=f'Failed to get user {target} public key' + ) + + transfer_manager = account_transfer.AccountTransferManager( + loader=context.enterprise_loader, auth=context.auth + ) + + affected = 0 + errors = [] + + for entry in entries: + try: + result = transfer_manager.transfer_account(entry.email, target, target_keys) + if result.success: + affected += 1 + else: + errors.append(f'{entry.email}: {result.error_message}') + except Exception as e: + errors.append(f'{entry.email}: {e}') + + status = 'success' if affected == len(entries) else 'incomplete' if affected > 0 else 'fail' + server_message = '\n'.join(errors) if errors else 'n/a' + + return action_report.ActionResult( + action='transfer_and_delete_user', + status=status, + affected_count=affected, + server_message=server_message + ) + + def _load_user_public_keys(self, auth: keeper_auth.KeeperAuth, username: str) -> Optional[keeper_auth.UserKeys]: + try: + rq = { + 'command': 'public_keys', + 'key_owners': [username] + } + rs = auth.execute_auth_command(rq) + + if 'public_keys' in rs and rs['public_keys']: + pk = rs['public_keys'][0] + return keeper_auth.UserKeys( + rsa=pk.get('public_key'), + ec=pk.get('public_ecc_key'), + aes=None + ) + except Exception as e: + self.logger.debug(f'Failed to load public keys for {username}: {e}') + + return None diff --git a/keepercli-package/src/keepercli/commands/aging_report.py b/keepercli-package/src/keepercli/commands/aging_report.py new file mode 100644 index 00000000..fe26da2f --- /dev/null +++ b/keepercli-package/src/keepercli/commands/aging_report.py @@ -0,0 +1,175 @@ +"""Password aging report command for Keeper CLI.""" + +import argparse +import datetime +import os +from typing import Any, List + +from keepersdk.enterprise import aging_report, enterprise_types +from keepersdk.authentication import keeper_auth +from . import base +from ..helpers import report_utils +from ..params import KeeperParams +from .. import api + + +class AgingReportCommand(base.ArgparseCommand): + """Command to generate a password aging report.""" + + def __init__(self): + parser = argparse.ArgumentParser( + prog='aging-report', + description='Run a password aging report', + parents=[base.report_output_parser] + ) + AgingReportCommand.add_arguments_to_parser(parser) + super().__init__(parser) + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser): + parser.add_argument('-r', '--rebuild', dest='rebuild', action='store_true', + help='Rebuild record database') + parser.add_argument('--delete', dest='delete', action='store_true', + help='Delete local database cache') + parser.add_argument('--no-cache', '-nc', dest='no_cache', action='store_true', + help='Remove local storage upon command completion') + parser.add_argument('-s', '--sort', dest='sort_by', action='store', default='last_changed', + choices=['owner', 'title', 'last_changed', 'shared'], + help='Sort output by column') + temporal_group = parser.add_mutually_exclusive_group() + temporal_group.add_argument('--period', dest='period', action='store', + help='Period password has not been modified (e.g., 10d, 3m, 1y)') + temporal_group.add_argument('--cutoff-date', dest='cutoff_date', action='store', + help='Date since password has not been modified (e.g., 2024-01-01)') + parser.add_argument('--username', dest='username', action='store', + help='Report expired passwords for user') + parser.add_argument('--exclude-deleted', dest='exclude_deleted', action='store_true', + help='Exclude deleted records from report') + parser.add_argument('--in-shared-folder', dest='in_shared_folder', action='store_true', + help='Limit report to records in shared folders') + + def execute(self, context: KeeperParams, **kwargs) -> Any: + base.require_login(context) + base.require_enterprise_admin(context) + + logger = api.get_logger() + enterprise_data = context.enterprise_data + auth = context.auth + enterprise_id = self._get_enterprise_id(auth) + + if kwargs.get('delete'): + return self._handle_delete(enterprise_data, auth, enterprise_id, logger) + + period_days, cutoff_date = self._parse_temporal_args(kwargs, logger) + + username = kwargs.get('username') + if username and not self._validate_username(enterprise_data, username, logger): + return + + config = aging_report.AgingReportConfig( + period_days=period_days, + cutoff_date=cutoff_date, + username=username, + exclude_deleted=kwargs.get('exclude_deleted', False), + in_shared_folder=kwargs.get('in_shared_folder', False), + rebuild=kwargs.get('rebuild', False), + no_cache=kwargs.get('no_cache', False), + server=context.keeper_config.server or 'keepersecurity.com' + ) + + if config.rebuild: + logger.info('Rebuilding record database...') + logger.info('Loading record password change information...') + + generator = aging_report.AgingReportGenerator( + context.enterprise_data, context.auth, config, vault=context.vault + ) + + try: + return self._generate_and_output_report( + generator, config, cutoff_date, period_days, kwargs, logger + ) + finally: + if config.no_cache: + generator.cleanup(enterprise_id) + logger.info('Local cache has been removed.') + + def _get_enterprise_id(self, auth: keeper_auth.KeeperAuth) -> int: + """Extract enterprise ID from context.""" + return auth.auth_context.enterprise_id + + def _handle_delete(self, enterprise_data: enterprise_types.IEnterpriseData, auth: keeper_auth.KeeperAuth, enterprise_id: int, logger) -> None: + """Handle --delete option.""" + config = aging_report.AgingReportConfig() + generator = aging_report.AgingReportGenerator(enterprise_data, auth, config) + if generator.delete_local_cache(enterprise_id): + logger.info('Local encrypted storage has been deleted.') + else: + logger.info('Local encrypted storage does not exist.') + + def _parse_temporal_args(self, kwargs, logger) -> tuple: + """Parse period/cutoff date arguments.""" + period_days = aging_report.DEFAULT_PERIOD_DAYS + cutoff_date = None + + cutoff_str = kwargs.get('cutoff_date') + period_str = kwargs.get('period') + + if cutoff_str: + cutoff_date = aging_report.parse_date(cutoff_str) + if cutoff_date is None: + raise base.CommandError(f'Invalid date format: {cutoff_str}') + logger.info(f'Reporting passwords not changed since {cutoff_date.strftime("%Y-%m-%d")}') + elif period_str: + parsed_days = aging_report.parse_period(period_str) + if parsed_days is None: + raise base.CommandError(f'Invalid period format: {period_str}. Use format like 10d, 3m, or 1y') + period_days = parsed_days + logger.info(f'Reporting passwords not changed in the last {period_days} days') + else: + logger.info('\n\nThe default password aging period is 3 months\n' + 'To change this value pass --period=[PERIOD] parameter\n' + '[PERIOD] example: 10d for 10 days; 3m for 3 months; 1y for 1 year\n\n') + + return period_days, cutoff_date + + def _validate_username(self, enterprise_data: enterprise_types.IEnterpriseData, username: str, logger) -> bool: + """Validate username exists in enterprise.""" + for user in enterprise_data.users.get_all_entities(): + if user.username.lower() == username.lower(): + return True + logger.info(f'User {username} is not a valid enterprise user') + return False + + def _generate_and_output_report(self, generator, config, cutoff_date, period_days, kwargs, logger): + """Generate report and output in requested format.""" + in_shared_folder = kwargs.get('in_shared_folder', False) + output_format = kwargs.get('format', 'table') + output_file = kwargs.get('output') + sort_by = kwargs.get('sort_by', 'last_changed') + + rows: List[List[Any]] = list(generator.generate_report_rows(include_shared_folder=in_shared_folder)) + headers = aging_report.AgingReportGenerator.get_headers(include_shared_folder=in_shared_folder) + + if output_format != 'json': + headers = [report_utils.field_to_title(h) for h in headers] + + sort_columns = {'owner': 0, 'title': 1, 'last_changed': 2, 'shared': 3} + cutoff_dt = cutoff_date or (datetime.datetime.now() - datetime.timedelta(days=period_days)) + + result = report_utils.dump_report_data( + rows, headers, fmt=output_format, filename=output_file, + title=f'Aging Report: Records With Passwords Last Modified Before {cutoff_dt.strftime("%Y/%m/%d %H:%M:%S")}', + sort_by=sort_columns.get(sort_by, 2), + sort_desc=sort_by in ('last_changed', 'shared') + ) + + logger.info(f'Found {len(rows)} record(s) with aging passwords') + + if output_file: + _, ext = os.path.splitext(output_file) + if not ext: + output_file += '.json' if output_format == 'json' else '.csv' + logger.info(f'Report saved to: {os.path.abspath(output_file)}') + + return result diff --git a/keepercli-package/src/keepercli/commands/audit_report.py b/keepercli-package/src/keepercli/commands/audit_report.py index 9826c613..0cb98337 100644 --- a/keepercli-package/src/keepercli/commands/audit_report.py +++ b/keepercli-package/src/keepercli/commands/audit_report.py @@ -14,6 +14,7 @@ syslog_templates: Optional[Dict[str, str]] = None + def load_syslog_templates(auth: keeper_auth.KeeperAuth) -> None: global syslog_templates if syslog_templates is None: @@ -53,6 +54,11 @@ def get_event_message(event: Dict[str, Any]) -> str: class EnterpriseAuditReport(base.ArgparseCommand): def __init__(self): parser = argparse.ArgumentParser(prog='audit-report', parents=[base.report_output_parser], description='Run an audit trail report.') + EnterpriseAuditReport.add_arguments_to_parser(parser) + super().__init__(parser) + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser): parser.add_argument('--syntax-help', dest='syntax_help', action='store_true', help='display help') parser.add_argument('--report-type', dest='report_type', action='store', choices=['raw', 'dim', 'hour', 'day', 'week', 'month', 'span'], @@ -89,17 +95,19 @@ def __init__(self): help='Filter: Geo location') parser.add_argument('--device-type', dest='device_type', action='store', help='Filter: Device type') - super().__init__(parser) - + def execute(self, context: KeeperParams, **kwargs) -> Any: base.require_login(context) base.require_enterprise_admin(context) + auth = context.auth + enterprise_data = context.enterprise_data + report_type = kwargs.get('report_type') if kwargs.get('syntax_help') is True or not report_type: prompt_utils.output_text(audit_report_description) if not report_type: - dim_report = audit_report.DimAuditReport(context.auth) + dim_report = audit_report.DimAuditReport(auth) events = dim_report.execute_dimension_report('audit_event_type') table = [] for event in events: @@ -110,7 +118,7 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: return has_aram = False - keeper_license = next(iter(context.enterprise_data.licenses.get_all_entities()), None) + keeper_license = next(iter(enterprise_data.licenses.get_all_entities()), None) if keeper_license and keeper_license.add_ons: has_aram = any((True for x in keeper_license.add_ons if x.name.lower() == 'enterprise_audit_and_reporting')) @@ -125,7 +133,7 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: raise base.CommandError('"dim" reports expect one "columns" parameter') column = columns[0] - dimension = EnterpriseAuditReport.load_audit_dimension(context.auth, column) + dimension = EnterpriseAuditReport.load_audit_dimension(auth, column) if dimension: table = [] if column == 'audit_event_type': @@ -150,9 +158,9 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: return report_utils.dump_report_data(table, fields, fmt=fmt, filename=kwargs.get('output')) elif report_type == 'raw': - raw_report = audit_report.RawAuditReport(context.auth) + raw_report = audit_report.RawAuditReport(auth) if has_aram: - raw_report.filter = self.get_report_filter(context.auth, **kwargs) + raw_report.filter = self.get_report_filter(auth, **kwargs) limit = kwargs.get('limit') if isinstance(limit, int): raw_report.limit = limit @@ -174,7 +182,7 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: misc_fields: Set[str] = set() if report_format == 'message': fields.append('message') - load_syslog_templates(context.auth) + load_syslog_templates(auth) else: misc_fields.update(MISC_FIELDS) @@ -198,9 +206,9 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: else: if not has_aram: raise base.CommandError('Audit Reporting addon is not enabled') - summary_report = audit_report.SummaryAuditReport(context.auth) + summary_report = audit_report.SummaryAuditReport(auth) summary_report.summary_type = report_type - summary_report.filter = self.get_report_filter(context.auth, **kwargs) + summary_report.filter = self.get_report_filter(auth, **kwargs) limit = kwargs.get('limit') if isinstance(limit, int): if not (0 <= limit <= 2000): @@ -265,7 +273,6 @@ def get_field_value(field: str, value: Any, *, report_type: str = 'raw') -> Any: return dt return value - @staticmethod def convert_date_filter(value): if isinstance(value, datetime.datetime): @@ -575,7 +582,6 @@ def load_audit_dimension(auth: keeper_auth.KeeperAuth, dimension) -> Optional[Li Example: "Commander", "Web App, 16.3.4" audit-report --report-type=dim --columns=device_type ''' -in_pattern = re.compile(r"\s*in\s*\(\s*(.*)\s*\)", re.IGNORECASE) between_pattern = re.compile(r"\s*between\s+(\S*)\s+and\s+(.*)", re.IGNORECASE) RAW_FIELDS = ('created', 'audit_event_type', 'username', 'ip_address', 'keeper_version', 'geo_location') diff --git a/keepercli-package/src/keepercli/commands/pam/keeper_pam.py b/keepercli-package/src/keepercli/commands/pam/keeper_pam.py index 92f4b52c..3c6a4162 100644 --- a/keepercli-package/src/keepercli/commands/pam/keeper_pam.py +++ b/keepercli-package/src/keepercli/commands/pam/keeper_pam.py @@ -3,16 +3,21 @@ import requests from datetime import datetime +from .pam_config import PAMConfigListCommand, PAMConfigNewCommand, PAMConfigEditCommand, PAMConfigRemoveCommand from .. import base from ... import api from ...helpers import report_utils, router_utils, gateway_utils from ...params import KeeperParams + from keepersdk import utils +from keepersdk.helpers import router_utils, gateway_utils +from keepersdk.vault import ksm_management logger = api.get_logger() + # Constants DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S' MILLISECONDS_TO_SECONDS = 1000 @@ -67,6 +72,7 @@ class PAMControllerCommand(base.GroupCommand): def __init__(self): super().__init__('PAM Controller') self.register_command(PAMGatewayCommand(), 'gateway', 'g') + self.register_command(PAMConfigCommand(), 'config', 'c') class PAMGatewayCommand(base.GroupCommand): @@ -79,6 +85,18 @@ def __init__(self): self.register_command(PAMGatewaySetMaxInstancesCommand(), 'set-max-instances', 'smi') self.default_verb = 'list' + +class PAMConfigCommand(base.GroupCommand): + + def __init__(self): + super().__init__('PAM Configurations') + self.register_command(PAMConfigListCommand(), 'list', 'l') + self.register_command(PAMConfigNewCommand(), 'new', 'n') + self.register_command(PAMConfigEditCommand(), 'edit', 'e') + self.register_command(PAMConfigRemoveCommand(), 'remove', 'rm') + self.default_verb = 'list' + + class PAMGatewayListCommand(base.ArgparseCommand): def __init__(self): @@ -527,7 +545,8 @@ def execute(self, context: KeeperParams, **kwargs): token_expire_in_min = kwargs.get('token_expire_in_min') self._log_gateway_creation_params(gateway_name, ksm_app, token_expire_in_min) - one_time_token = gateway_utils.create_gateway(vault, gateway_name, ksm_app, token_expire_in_min) + ksm_app_info = ksm_management.get_secrets_manager_app(vault, ksm_app) + one_time_token = gateway_utils.create_gateway(vault, gateway_name, ksm_app_info.uid, token_expire_in_min) if is_return_value: return one_time_token @@ -650,4 +669,3 @@ def _set_max_instances(self, vault, gateway, max_instances): logger.info('%s: max instance count set to %d', gateway.controllerName, max_instances) except Exception as e: raise base.CommandError(f'Error setting max instances: {e}') - diff --git a/keepercli-package/src/keepercli/commands/pam/pam_config.py b/keepercli-package/src/keepercli/commands/pam/pam_config.py new file mode 100644 index 00000000..2afd486b --- /dev/null +++ b/keepercli-package/src/keepercli/commands/pam/pam_config.py @@ -0,0 +1,1104 @@ +import argparse +import json +import re + +from .. import base +from ... import api +from ...helpers import report_utils, gateway_utils, folder_utils +from ...params import KeeperParams +from ..record_edit import RecordEditMixin + + +from keepersdk import utils +from keepersdk.proto import pam_pb2, record_pb2 +from keepersdk.helpers import gateway_utils, config_utils +from keepersdk.vault import vault_online, vault_utils, vault_record, record_management +from keepersdk.helpers.pam_config_facade import PamConfigurationRecordFacade +from keepersdk.helpers.tunnel.tunnel_graph import TunnelDAG, tunnel_utils +from .. import record_edit + + +logger = api.get_logger() + + +# PAM Configuration record types +PAM_CONFIG_RECORD_TYPES = ( + 'pamAwsConfiguration', 'pamAzureConfiguration', 'pamGcpConfiguration', + 'pamDomainConfiguration', 'pamNetworkConfiguration', 'pamOciConfiguration' +) + + +class PAMConfigListCommand(base.ArgparseCommand): + + def __init__(self): + parser = argparse.ArgumentParser(prog='pam config list') + PAMConfigListCommand.add_arguments_to_parser(parser) + super().__init__(parser) + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser): + parser.add_argument('--config', '-c', required=False, dest='pam_configuration', action='store', + help='Specific PAM Configuration UID') + parser.add_argument('--verbose', '-v', required=False, dest='verbose', action='store_true', help='Verbose') + parser.add_argument('--format', dest='format', action='store', choices=['table', 'json'], default='table', + help='Output format (table, json)') + + def execute(self, context: KeeperParams, **kwargs): + self._validate_vault_and_permissions(context) + + vault = context.vault + pam_configuration_uid = kwargs.get('pam_configuration') + is_verbose = kwargs.get('verbose') + format_type = kwargs.get('format', 'table') + + if not pam_configuration_uid: + result = self._list_all_configurations(vault, is_verbose, format_type) + if format_type == 'json' and result: + return result + else: + result = self._list_single_configuration(vault, pam_configuration_uid, is_verbose, format_type) + if format_type == 'json' and result: + return result + + if format_type == 'table': + self._print_tunneling_config(vault, pam_configuration_uid) + + def _validate_vault_and_permissions(self, context: KeeperParams): + """Validates that vault is initialized and user has enterprise admin permissions.""" + if not context.vault: + raise ValueError("Vault is not initialized, login to initialize the vault.") + base.require_enterprise_admin(context) + + def _print_tunneling_config(self, vault: vault_online.VaultOnline, config_uid: str): + """Prints tunneling configuration for a specific PAM configuration.""" + encrypted_session_token, encrypted_transmission_key, _ = gateway_utils.get_keeper_tokens(vault) + tmp_dag = TunnelDAG(vault, encrypted_session_token, encrypted_transmission_key, config_uid, is_config=True) + tmp_dag.print_tunneling_config(config_uid, None) + + def _list_single_configuration(self, vault: vault_online.VaultOnline, config_uid: str, + is_verbose: bool, format_type: str): + """Lists details for a single PAM configuration.""" + configuration = self._load_and_validate_configuration(vault, config_uid, format_type) + facade = self._create_facade(configuration) + shared_folder = self._load_shared_folder(vault, facade.folder_uid) + + if format_type == 'json': + return self._format_single_config_json(configuration, facade, shared_folder) + else: + self._format_single_config_table(configuration, facade, shared_folder) + + def _list_all_configurations(self, vault: vault_online.VaultOnline, is_verbose: bool, format_type: str): + """Lists all PAM configurations.""" + configs_data = [] + table = [] + headers = self._build_list_headers(is_verbose, format_type) + + for config_record in self._find_pam_configurations(vault): + facade = self._create_facade(config_record) + shared_folder_parents = vault_utils.get_folders_for_record(vault.vault_data, config_record.record_uid) + + if not shared_folder_parents: + logger.warning(f'Following configuration is not in the shared folder: UID: %s, Title: %s', + config_record.record_uid, config_record.title) + continue + + shared_folder = shared_folder_parents[0] + full_record = vault.vault_data.load_record(config_record.record_uid) + + if format_type == 'json': + config_data = self._build_config_json_data(config_record, facade, shared_folder, full_record, is_verbose) + configs_data.append(config_data) + else: + row = self._build_config_table_row(config_record, facade, shared_folder, full_record, is_verbose) + table.append(row) + + return self._format_output(configs_data, table, headers, format_type) + + def _load_and_validate_configuration(self, vault: vault_online.VaultOnline, config_uid: str, format_type: str): + """Loads and validates a PAM configuration record.""" + configuration = vault.vault_data.load_record(config_uid) + if not configuration: + self._handle_error(format_type, f'Configuration {config_uid} not found') + + if configuration.version != 6 or not isinstance(configuration, vault_record.TypedRecord): + self._handle_error(format_type, f'{config_uid} is not PAM Configuration') + + return configuration + + def _handle_error(self, format_type: str, error_message: str): + """Handles errors based on output format.""" + if format_type == 'json': + return json.dumps({"error": error_message}) + else: + raise Exception(error_message) + + def _create_facade(self, configuration): + """Creates a PAM configuration facade for the given record.""" + facade = PamConfigurationRecordFacade() + facade.record = configuration + return facade + + def _load_shared_folder(self, vault: vault_online.VaultOnline, folder_uid: str): + """Loads shared folder if it exists.""" + if folder_uid and folder_uid in vault.vault_data._shared_folders: + return vault.vault_data.load_shared_folder(folder_uid) + return None + + def _find_pam_configurations(self, vault: vault_online.VaultOnline): + """Finds all PAM configuration records.""" + for record in vault.vault_data.find_records(criteria='', record_type=None, record_version=6): + if record.record_type in PAM_CONFIG_RECORD_TYPES: + yield record + else: + logger.warning(f'Following configuration has unsupported type: UID: %s, Title: %s', + record.record_uid, record.title) + + def _build_list_headers(self, is_verbose: bool, format_type: str): + """Builds headers for the configuration list output.""" + if format_type == 'json': + headers = ['uid', 'config_name', 'config_type', 'shared_folder', 'gateway_uid', 'resource_record_uids'] + if is_verbose: + headers.append('fields') + else: + headers = ['UID', 'Config Name', 'Config Type', 'Shared Folder', 'Gateway UID', 'Resource Record UIDs'] + if is_verbose: + headers.append('Fields') + return headers + + def _extract_config_fields(self, record, is_verbose: bool): + """Extracts field data from a configuration record.""" + fields_data = {} if is_verbose else [] + + for field in record.fields: + if field.type in ('pamResources', 'fileRef'): + continue + + values = list(field.get_external_value()) + if not values: + continue + + field_name = field.external_name() + if field.type == 'schedule': + field_name = 'Default Schedule' + + value_str = ', '.join(field.get_external_value()) + if is_verbose: + fields_data[field_name] = value_str + else: + fields_data.append(f'{field_name}: {value_str}') + + return fields_data + + def _build_config_json_data(self, config_record, facade, shared_folder, full_record, is_verbose: bool): + """Builds JSON data structure for a configuration.""" + config_data = { + "uid": config_record.record_uid, + "config_name": config_record.title, + "config_type": config_record.record_type, + "shared_folder": { + "name": shared_folder.name, + "uid": shared_folder.folder_uid + }, + "gateway_uid": facade.controller_uid, + "resource_record_uids": facade.resource_ref + } + + if is_verbose: + config_data["fields"] = self._extract_config_fields(full_record, is_verbose=True) + + return config_data + + def _build_config_table_row(self, config_record, facade, shared_folder, full_record, is_verbose: bool): + """Builds a table row for a configuration.""" + row = [ + config_record.record_uid, + config_record.title, + config_record.record_type, + f'{shared_folder.name} ({shared_folder.folder_uid})', + facade.controller_uid, + facade.resource_ref + ] + + if is_verbose: + fields = self._extract_config_fields(full_record, is_verbose=False) + row.append(fields) + + return row + + def _format_output(self, configs_data, table, headers, format_type: str): + """Formats and outputs the final result.""" + if format_type == 'json': + configs_data.sort(key=lambda x: x['config_name'] or '') + return json.dumps({"configurations": configs_data}, indent=2) + else: + table.sort(key=lambda x: (x[1] or '')) + report_utils.dump_report_data(table, headers, fmt='table', filename="", row_number=False, column_width=None) + + def _format_single_config_json(self, configuration, facade, shared_folder): + """Formats a single configuration as JSON.""" + config_data = { + "uid": configuration.record_uid, + "name": configuration.title, + "config_type": configuration.record_type, + "shared_folder": { + "name": shared_folder.name if shared_folder else None, + "uid": shared_folder.shared_folder_uid if shared_folder else None + } if shared_folder else None, + "gateway_uid": facade.controller_uid, + "resource_record_uids": facade.resource_ref, + "fields": {} + } + + for field in configuration.fields: + if field.type in ('pamResources', 'fileRef'): + continue + + values = list(field.get_external_value()) + if not values: + continue + + field_name = field.external_name() + if field.type == 'schedule': + field_name = 'Default Schedule' + + config_data["fields"][field_name] = values + + return json.dumps(config_data, indent=2) + + def _format_single_config_table(self, configuration, facade, shared_folder): + """Formats a single configuration as a table.""" + table = [] + header = ['name', 'value'] + + table.append(['UID', configuration.record_uid]) + table.append(['Name', configuration.title]) + table.append(['Config Type', configuration.record_type]) + table.append(['Shared Folder', f'{shared_folder.name} ({shared_folder.shared_folder_uid})' if shared_folder else '']) + table.append(['Gateway UID', facade.controller_uid]) + table.append(['Resource Record UIDs', facade.resource_ref]) + + for field in configuration.fields: + if field.type in ('pamResources', 'fileRef'): + continue + + values = list(field.get_external_value()) + if not values: + continue + + field_name = field.external_name() + if field.type == 'schedule': + field_name = 'Default Schedule' + + table.append([field_name, values]) + + report_utils.dump_report_data(table, header, no_header=True, right_align=(0,)) + + +class PamConfigurationEditMixin(record_edit.RecordEditMixin): + pam_record_types = None + + def __init__(self): + super().__init__() + + @staticmethod + def get_pam_record_types(vault: vault_online.VaultOnline): + """Gets cached list of PAM record types.""" + if PamConfigurationEditMixin.pam_record_types is None: + rts = [x for x in vault.vault_data._custom_record_types if x.scope // 1000000 == record_pb2.RT_PAM] + PamConfigurationEditMixin.pam_record_types = [rt.id for rt in rts] + return PamConfigurationEditMixin.pam_record_types + + def parse_pam_configuration(self, vault: vault_online.VaultOnline, record: vault_record.TypedRecord, **kwargs): + """Parses PAM configuration fields: gateway, shared folder, and resource records.""" + field = self._get_or_create_pam_resources_field(record) + value = self._ensure_pam_resources_value(field) + + self._parse_gateway_uid(vault, value, kwargs) + self._parse_shared_folder_uid(vault, record, value, kwargs) + self._parse_resource_records(vault, value, kwargs) + + def _get_or_create_pam_resources_field(self, record: vault_record.TypedRecord): + """Gets or creates the pamResources field.""" + field = record.get_typed_field('pamResources') + if not field: + field = vault_record.TypedField.new_field('pamResources', {}) + record.fields.append(field) + return field + + def _ensure_pam_resources_value(self, field): + """Ensures the pamResources field has a value dictionary.""" + if len(field.value) == 0: + field.value.append({}) + return field.value[0] + + def _parse_gateway_uid(self, vault: vault_online.VaultOnline, value: dict, kwargs: dict): + """Resolves and sets the gateway UID from kwargs.""" + gateway = kwargs.get('gateway_uid') + if not gateway: + return + + gateways = gateway_utils.get_all_gateways(vault) + gateway_uid = next( + (utils.base64_url_encode(x.controllerUid) for x in gateways + if utils.base64_url_encode(x.controllerUid) == gateway + or x.controllerName.casefold() == gateway.casefold()), + None + ) + + if gateway_uid: + value['controllerUid'] = gateway_uid + + def _parse_shared_folder_uid(self, vault: vault_online.VaultOnline, record: vault_record.TypedRecord, + value: dict, kwargs: dict): + """Resolves and sets the shared folder UID from kwargs or existing record.""" + folder_name = kwargs.get('shared_folder_uid') + shared_folder_uid = None + + if folder_name: + shared_folder_uid = self._find_shared_folder_by_name_or_uid(vault, folder_name) + + if not shared_folder_uid: + shared_folder_uid = self._get_existing_shared_folder_uid(record) + + if shared_folder_uid: + value['folderUid'] = shared_folder_uid + else: + raise base.CommandError('Shared Folder not found') + + def _find_shared_folder_by_name_or_uid(self, vault: vault_online.VaultOnline, folder_name: str): + """Finds a shared folder by UID or name.""" + shared_folder_cache = vault.vault_data._shared_folders + + if folder_name in shared_folder_cache: + return folder_name + + for sf_uid in shared_folder_cache: + sf = vault.vault_data.load_shared_folder(sf_uid) + if sf and sf.name.casefold() == folder_name.casefold(): + return sf_uid + + return None + + def _get_existing_shared_folder_uid(self, record: vault_record.TypedRecord): + """Gets the existing shared folder UID from the record.""" + for f in record.fields: + if f.type == 'pamResources' and f.value and len(f.value) > 0: + return f.value[0].get('folderUid') + return None + + def _parse_resource_records(self, vault: vault_online.VaultOnline, value: dict, kwargs: dict): + """Removes resource records from the configuration.""" + remove_records = kwargs.get('remove_records') + if not remove_records: + return + + pam_record_lookup = self._build_pam_record_lookup(vault) + record_uids = set(value.get('resourceRef', [])) + + if isinstance(remove_records, list): + for r in remove_records: + record_uid = pam_record_lookup.get(r) or pam_record_lookup.get(r.lower()) + if record_uid: + record_uids.discard(record_uid) + else: + logger.warning(f'Failed to find PAM record: {r}') + + value['resourceRef'] = list(record_uids) + + def _build_pam_record_lookup(self, vault: vault_online.VaultOnline): + """Builds a lookup dictionary for PAM records by UID and title.""" + pam_record_lookup = {} + rti = PamConfigurationEditMixin.get_pam_record_types(vault) + + for r in vault.vault_data.records(): + if r.record_type in rti: + pam_record_lookup[r.record_uid] = r.record_uid + pam_record_lookup[r.title.lower()] = r.record_uid + + return pam_record_lookup + + @staticmethod + def resolve_single_record(vault: vault_online.VaultOnline, record_name: str, rec_type: str = ''): + """Resolves a single record by name and optional type.""" + for r in vault.vault_data.records(): + if r.title == record_name and (not rec_type or rec_type == r.record_type): + return r + return None + + def parse_properties(self, vault: vault_online.VaultOnline, record: vault_record.TypedRecord, **kwargs): + """Parses all configuration properties based on record type.""" + self.parse_pam_configuration(vault, record, **kwargs) + + extra_properties = [] + self._parse_common_properties(extra_properties, kwargs) + self._parse_type_specific_properties(vault, record, extra_properties, kwargs) + + if extra_properties: + parsed_fields = [record_edit.RecordEditMixin.parse_field(x) for x in extra_properties] + self.assign_typed_fields(record, parsed_fields) + + def _parse_common_properties(self, extra_properties: list, kwargs: dict): + """Parses properties common to all PAM configuration types.""" + port_mapping = kwargs.get('port_mapping') + if isinstance(port_mapping, list) and len(port_mapping) > 0: + pm = "\n".join(port_mapping) + extra_properties.append(f'multiline.portMapping={pm}') + + schedule = kwargs.get('default_schedule') + if schedule: + valid, err = validate_cron_expression(schedule, for_rotation=True) + if not valid: + raise base.CommandError(f'Invalid CRON "{schedule}" Error: {err}') + extra_properties.append(f'schedule.defaultRotationSchedule=$JSON:{{"type": "CRON", "cron": "{schedule}", "tz": "Etc/UTC"}}') + else: + extra_properties.append('schedule.defaultRotationSchedule=On-Demand') + + def _parse_type_specific_properties(self, vault: vault_online.VaultOnline, record: vault_record.TypedRecord, + extra_properties: list, kwargs: dict): + """Parses properties specific to each configuration type.""" + record_type = record.record_type + + if record_type == 'pamNetworkConfiguration': + self._parse_network_properties(extra_properties, kwargs) + elif record_type == 'pamAwsConfiguration': + self._parse_aws_properties(extra_properties, kwargs) + elif record_type == 'pamGcpConfiguration': + self._parse_gcp_properties(extra_properties, kwargs) + elif record_type == 'pamAzureConfiguration': + self._parse_azure_properties(extra_properties, kwargs) + elif record_type == 'pamDomainConfiguration': + self._parse_domain_properties(vault, record, extra_properties, kwargs) + elif record_type == 'pamOciConfiguration': + self._parse_oci_properties(extra_properties, kwargs) + + def _parse_network_properties(self, extra_properties: list, kwargs: dict): + """Parses network configuration properties.""" + network_id = kwargs.get('network_id') + if network_id: + extra_properties.append(f'text.networkId={network_id}') + + network_cidr = kwargs.get('network_cidr') + if network_cidr: + extra_properties.append(f'text.networkCIDR={network_cidr}') + + def _parse_aws_properties(self, extra_properties: list, kwargs: dict): + """Parses AWS configuration properties.""" + aws_id = kwargs.get('aws_id') + if aws_id: + extra_properties.append(f'text.awsId={aws_id}') + + access_key_id = kwargs.get('access_key_id') + if access_key_id: + extra_properties.append(f'secret.accessKeyId={access_key_id}') + + access_secret_key = kwargs.get('access_secret_key') + if access_secret_key: + extra_properties.append(f'secret.accessSecretKey={access_secret_key}') + + region_names = kwargs.get('region_names') + if region_names: + regions = '\n'.join(region_names) + extra_properties.append(f'multiline.regionNames={regions}') + + def _parse_gcp_properties(self, extra_properties: list, kwargs: dict): + """Parses GCP configuration properties.""" + gcp_id = kwargs.get('gcp_id') + if gcp_id: + extra_properties.append(f'text.pamGcpId={gcp_id}') + + service_account_key = kwargs.get('service_account_key') + if service_account_key: + extra_properties.append(f'json.pamServiceAccountKey={service_account_key}') + + google_admin_email = kwargs.get('google_admin_email') + if google_admin_email: + extra_properties.append(f'email.pamGoogleAdminEmail={google_admin_email}') + + gcp_region = kwargs.get('region_names') + if gcp_region: + regions = '\n'.join(gcp_region) + extra_properties.append(f'multiline.pamGcpRegionName={regions}') + + def _parse_azure_properties(self, extra_properties: list, kwargs: dict): + """Parses Azure configuration properties.""" + azure_id = kwargs.get('azure_id') + if azure_id: + extra_properties.append(f'text.azureId={azure_id}') + + client_id = kwargs.get('client_id') + if client_id: + extra_properties.append(f'secret.clientId={client_id}') + + client_secret = kwargs.get('client_secret') + if client_secret: + extra_properties.append(f'secret.clientSecret={client_secret}') + + subscription_id = kwargs.get('subscription_id') + if subscription_id: + extra_properties.append(f'secret.subscriptionId={subscription_id}') + + tenant_id = kwargs.get('tenant_id') + if tenant_id: + extra_properties.append(f'secret.tenantId={tenant_id}') + + resource_groups = kwargs.get('resource_groups') + if isinstance(resource_groups, list) and len(resource_groups) > 0: + rg = '\n'.join(resource_groups) + extra_properties.append(f'multiline.resourceGroups={rg}') + + def _parse_domain_properties(self, vault: vault_online.VaultOnline, record: vault_record.TypedRecord, + extra_properties: list, kwargs: dict): + """Parses domain configuration properties.""" + domain_id = kwargs.get('domain_id') + if domain_id: + extra_properties.append(f'text.pamDomainId={domain_id}') + + self._parse_domain_hostname(extra_properties, kwargs) + self._parse_domain_ssl_settings(extra_properties, kwargs) + self._parse_domain_network_settings(extra_properties, kwargs) + self._parse_domain_admin_credential(vault, record, kwargs) + + def _parse_domain_hostname(self, extra_properties: list, kwargs: dict): + """Parses domain hostname and port settings.""" + host = str(kwargs.get('domain_hostname') or '').strip() + port = str(kwargs.get('domain_port') or '').strip() + if host or port: + val = json.dumps({"hostName": host, "port": port}) + extra_properties.append(f"f.pamHostname=$JSON:{val}") + + def _parse_domain_ssl_settings(self, extra_properties: list, kwargs: dict): + """Parses domain SSL and scan settings.""" + domain_use_ssl = utils.value_to_boolean(kwargs.get('domain_use_ssl')) + if domain_use_ssl is not None: + val = 'true' if domain_use_ssl else 'false' + extra_properties.append(f'checkbox.useSSL={val}') + + domain_scan_dc_cidr = utils.value_to_boolean(kwargs.get('domain_scan_dc_cidr')) + if domain_scan_dc_cidr is not None: + val = 'true' if domain_scan_dc_cidr else 'false' + extra_properties.append(f'checkbox.scanDCCIDR={val}') + + def _parse_domain_network_settings(self, extra_properties: list, kwargs: dict): + """Parses domain network CIDR settings.""" + domain_network_cidr = kwargs.get('domain_network_cidr') + if domain_network_cidr: + extra_properties.append(f'text.networkCIDR={domain_network_cidr}') + + def _parse_domain_admin_credential(self, vault: vault_online.VaultOnline, record: vault_record.TypedRecord, + kwargs: dict): + """Parses and validates domain administrative credential.""" + domain_administrative_credential = kwargs.get('domain_administrative_credential') + dac = str(domain_administrative_credential or '') + + if not dac: + return + + if kwargs.get('force_domain_admin', False) is True: + if not re.search('^[A-Za-z0-9-_]{22}$', dac): + logger.warning(f'Invalid Domain Admin User UID: "{dac}" (skipped)') + dac = '' + else: + adm_rec = PamConfigurationEditMixin.resolve_single_record(vault, dac, 'pamUser') + if adm_rec and isinstance(adm_rec, vault_record.TypedRecord) and adm_rec.record_type == 'pamUser': + dac = adm_rec.record_uid + else: + logger.warning(f'Domain Admin User UID: "{dac}" not found (skipped).') + dac = '' + + if dac: + prf = record.get_typed_field('pamResources') + prf.value = prf.value or [{}] + prf.value[0]["adminCredentialRef"] = dac + + def _parse_oci_properties(self, extra_properties: list, kwargs: dict): + """Parses OCI configuration properties.""" + oci_id = kwargs.get('oci_id') + if oci_id: + extra_properties.append(f'text.pamOciId={oci_id}') + + oci_admin_id = kwargs.get('oci_admin_id') + if oci_admin_id: + extra_properties.append(f'secret.adminOcid={oci_admin_id}') + + oci_admin_public_key = kwargs.get('oci_admin_public_key') + if oci_admin_public_key: + extra_properties.append(f'secret.adminPublicKey={oci_admin_public_key}') + + oci_admin_private_key = kwargs.get('oci_admin_private_key') + if oci_admin_private_key: + extra_properties.append(f'secret.adminPrivateKey={oci_admin_private_key}') + + oci_tenancy = kwargs.get('oci_tenancy') + if oci_tenancy: + extra_properties.append(f'text.tenancyOci={oci_tenancy}') + + oci_region = kwargs.get('oci_region') + if oci_region: + extra_properties.append(f'text.regionOci={oci_region}') + + def verify_required(self, record: vault_record.TypedRecord): + """Verifies and sets default values for required fields.""" + for field in record.fields: + if field.required and len(field.value) == 0: + if field.type == 'schedule': + field.value = [{'type': 'ON_DEMAND'}] + else: + self.warnings.append(f'Empty required field: "{field.external_name()}"') + + for custom in record.custom: + if custom.required: + custom.required = False + + +# Configuration type mapping +CONFIG_TYPE_TO_RECORD_TYPE = { + 'aws': 'pamAwsConfiguration', + 'azure': 'pamAzureConfiguration', + 'local': 'pamNetworkConfiguration', + 'network': 'pamNetworkConfiguration', + 'gcp': 'pamGcpConfiguration', + 'domain': 'pamDomainConfiguration', + 'oci': 'pamOciConfiguration' +} + + +class PAMConfigNewCommand(base.ArgparseCommand, PamConfigurationEditMixin): + + def __init__(self): + self.choices = ['on', 'off', 'default'] + parser = argparse.ArgumentParser(prog='pam config new') + PAMConfigNewCommand.add_arguments_to_parser(parser) + super().__init__(parser) + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser): + choices = ['on', 'off', 'default'] + parser.add_argument('--config-type', '-ct', dest='config_type', action='store', + choices=['network', 'aws', 'azure'], help='PAM Configuration Type', ) + parser.add_argument('--title', '-t', dest='title', action='store', help='Title of the PAM Configuration') + parser.add_argument('--gateway', '-g', dest='gateway', action='store', help='Gateway UID or Name') + parser.add_argument('--shared-folder', '-sf', dest='shared_folder', action='store', + help='Share Folder where this PAM Configuration is stored. Should be one of the folders to ' + 'which the gateway has access to.') + parser.add_argument('--resource-record', '-rr', dest='resource_records', action='append', + help='Resource Record UID') + parser.add_argument('--schedule', '-sc', dest='default_schedule', action='store', help='Default Schedule: Use CRON syntax') + parser.add_argument('--port-mapping', '-pm', dest='port_mapping', action='append', help='Port Mapping') + network_group = parser.add_argument_group('network', 'Local network configuration') + network_group.add_argument('--network-id', dest='network_id', action='store', help='Network ID') + network_group.add_argument('--network-cidr', dest='network_cidr', action='store', help='Network CIDR') + aws_group = parser.add_argument_group('aws', 'AWS configuration') + aws_group.add_argument('--aws-id', dest='aws_id', action='store', help='AWS ID') + aws_group.add_argument('--access-key-id', dest='access_key_id', action='store', help='Access Key Id') + aws_group.add_argument('--access-secret-key', dest='access_secret_key', action='store', help='Access Secret Key') + aws_group.add_argument('--region-name', dest='region_names', action='append', help='Region Names') + azure_group = parser.add_argument_group('azure', 'Azure configuration') + azure_group.add_argument('--azure-id', dest='azure_id', action='store', help='Azure Id') + azure_group.add_argument('--client-id', dest='client_id', action='store', help='Client Id') + azure_group.add_argument('--client-secret', dest='client_secret', action='store', help='Client Secret') + azure_group.add_argument('--subscription_id', dest='subscription_id', action='store', + help='Subscription Id') + azure_group.add_argument('--tenant-id', dest='tenant_id', action='store', help='Tenant Id') + azure_group.add_argument('--resource-group', dest='resource_group', action='append', help='Resource Group') + + def execute(self, context: KeeperParams, **kwargs): + self.warnings.clear() + self._validate_vault(context) + + vault = context.vault + record_type = self._resolve_record_type(kwargs) + title = self._validate_title(kwargs) + + record = self._create_record(vault, record_type, title) + self._resolve_shared_folder_path(context, kwargs) + self.parse_properties(vault, record, **kwargs) + + gateway_uid, shared_folder_uid, admin_cred_ref = self._extract_pam_resources(record, kwargs) + self._validate_shared_folder(shared_folder_uid, kwargs) + self._warn_if_gateway_missing(gateway_uid, kwargs) + + self.verify_required(record) + self._create_and_configure_record(vault, record, shared_folder_uid, gateway_uid, admin_cred_ref, kwargs) + + self._log_warnings() + return record.record_uid + + def _validate_vault(self, context: KeeperParams): + """Validates that vault is initialized.""" + if not context.vault: + raise base.CommandError('Vault is not initialized. Login to initialize the vault.') + + def _resolve_record_type(self, kwargs: dict) -> str: + """Resolves the record type from config type parameter.""" + config_type = kwargs.get('config_type') + if not config_type: + raise base.CommandError('--config-type parameter is required') + + record_type = CONFIG_TYPE_TO_RECORD_TYPE.get(config_type) + if not record_type: + supported = ', '.join(CONFIG_TYPE_TO_RECORD_TYPE.keys()) + raise base.CommandError(f'--config-type {config_type} is not supported - supported options: {supported}') + + return record_type + + def _validate_title(self, kwargs: dict) -> str: + """Validates that title is provided.""" + title = kwargs.get('title') + if not title: + raise base.CommandError('--title parameter is required') + return title + + def _create_record(self, vault: vault_online.VaultOnline, record_type: str, title: str): + """Creates a new typed record with the specified type and title.""" + record = vault_record.TypedRecord(version=6) + record.type_name = record_type + record.title = title + + record_type_def = vault.vault_data.get_record_type_by_name(record_type) + if record_type_def and record_type_def.fields: + RecordEditMixin.adjust_typed_record_fields(record, record_type_def.fields) + + return record + + def _resolve_shared_folder_path(self, context: KeeperParams, kwargs: dict): + """Resolves shared folder path to UID.""" + sf_name = kwargs.get('shared_folder_uid', '') + if not sf_name: + return + + fpath = folder_utils.try_resolve_path(context, sf_name) + if fpath and len(fpath) >= 2 and fpath[-1] == '': + sfuid = fpath[-2].uid + if sfuid: + kwargs['shared_folder_uid'] = sfuid + + def _extract_pam_resources(self, record: vault_record.TypedRecord, kwargs: dict): + """Extracts gateway UID, shared folder UID, and admin credential ref from record.""" + field = record.get_typed_field('pamResources') + if not field: + raise base.CommandError('PAM configuration record does not contain resource field') + + value = field.get_default_value(dict) + if not value: + return None, None, None + + gateway_uid = value.get('controllerUid') + shared_folder_uid = value.get('folderUid') + admin_cred_ref = None + + if record.record_type == 'pamDomainConfiguration' and not kwargs.get('force_domain_admin', False): + admin_cred_ref = value.get('adminCredentialRef') + + return gateway_uid, shared_folder_uid, admin_cred_ref + + def _validate_shared_folder(self, shared_folder_uid: str, kwargs: dict): + """Validates that shared folder UID is present.""" + if not shared_folder_uid: + raise base.CommandError('--shared-folder parameter is required to create a PAM configuration') + + def _warn_if_gateway_missing(self, gateway_uid: str, kwargs: dict): + """Warns if gateway is not found.""" + if not gateway_uid: + gw_name = kwargs.get('gateway_uid') or '' + logger.warning(f'Gateway "{gw_name}" not found.') + + def _create_and_configure_record(self, vault: vault_online.VaultOnline, record: vault_record.TypedRecord, + shared_folder_uid: str, gateway_uid: str, admin_cred_ref: str, kwargs: dict): + """Creates the record and configures tunneling, DAG, and controller.""" + config_utils.pam_configuration_create_record_v6(vault, record, shared_folder_uid) + + self._configure_tunneling(vault, record, admin_cred_ref, kwargs) + + vault.sync_down() + record_management.move_vault_objects(vault, [record.record_uid], shared_folder_uid) + vault.sync_down() + + if gateway_uid: + self._set_configuration_controller(vault, record.record_uid, gateway_uid) + + def _configure_tunneling(self, vault: vault_online.VaultOnline, record: vault_record.TypedRecord, + admin_cred_ref: str, kwargs: dict): + """Configures tunneling settings for the configuration.""" + encrypted_session_token, encrypted_transmission_key, _ = tunnel_utils.get_keeper_tokens(vault) + tmp_dag = TunnelDAG(vault, encrypted_session_token, encrypted_transmission_key, + record_uid=record.record_uid, is_config=True) + + tmp_dag.edit_tunneling_config( + kwargs.get('connections'), + kwargs.get('tunneling'), + kwargs.get('rotation'), + kwargs.get('recording'), + kwargs.get('typescriptrecording'), + kwargs.get('remotebrowserisolation') + ) + + if admin_cred_ref: + tmp_dag.link_user_to_config_with_options(admin_cred_ref, is_admin='on') + + tmp_dag.print_tunneling_config(record.record_uid, None) + + def _set_configuration_controller(self, vault: vault_online.VaultOnline, config_uid: str, gateway_uid: str): + """Sets the controller for the PAM configuration.""" + pcc = pam_pb2.PAMConfigurationController() + pcc.configurationUid = utils.base64_url_decode(config_uid) + pcc.controllerUid = utils.base64_url_decode(gateway_uid) + vault.keeper_auth.execute_auth_rest('pam/set_configuration_controller', pcc) + + def _log_warnings(self): + """Logs all warnings.""" + for w in self.warnings: + logger.warning(w) + + +class PAMConfigEditCommand(base.ArgparseCommand, PamConfigurationEditMixin): + + def __init__(self): + parser = argparse.ArgumentParser(prog='pam config edit') + PAMConfigEditCommand.add_arguments_to_parser(parser) + super().__init__(parser) + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser): + choices = ['on', 'off', 'default'] + parser.add_argument('--config-type', '-ct', dest='config_type', action='store', + choices=['network', 'aws', 'azure'], help='PAM Configuration Type', ) + parser.add_argument('--title', '-t', dest='title', action='store', help='Title of the PAM Configuration') + parser.add_argument('--gateway', '-g', dest='gateway', action='store', help='Gateway UID or Name') + parser.add_argument('--shared-folder', '-sf', dest='shared_folder', action='store', + help='Share Folder where this PAM Configuration is stored. Should be one of the folders to ' + 'which the gateway has access to.') + parser.add_argument('--resource-record', '-rr', dest='resource_records', action='append', + help='Resource Record UID') + parser.add_argument('--schedule', '-sc', dest='default_schedule', action='store', help='Default Schedule: Use CRON syntax') + parser.add_argument('--port-mapping', '-pm', dest='port_mapping', action='append', help='Port Mapping') + network_group = parser.add_argument_group('network', 'Local network configuration') + network_group.add_argument('--network-id', dest='network_id', action='store', help='Network ID') + network_group.add_argument('--network-cidr', dest='network_cidr', action='store', help='Network CIDR') + aws_group = parser.add_argument_group('aws', 'AWS configuration') + aws_group.add_argument('--aws-id', dest='aws_id', action='store', help='AWS ID') + aws_group.add_argument('--access-key-id', dest='access_key_id', action='store', help='Access Key Id') + aws_group.add_argument('--access-secret-key', dest='access_secret_key', action='store', help='Access Secret Key') + aws_group.add_argument('--region-name', dest='region_names', action='append', help='Region Names') + azure_group = parser.add_argument_group('azure', 'Azure configuration') + azure_group.add_argument('--azure-id', dest='azure_id', action='store', help='Azure Id') + azure_group.add_argument('--client-id', dest='client_id', action='store', help='Client Id') + azure_group.add_argument('--client-secret', dest='client_secret', action='store', help='Client Secret') + azure_group.add_argument('--subscription_id', dest='subscription_id', action='store', + help='Subscription Id') + azure_group.add_argument('--tenant-id', dest='tenant_id', action='store', help='Tenant Id') + azure_group.add_argument('--resource-group', dest='resource_group', action='append', help='Resource Group') + + def execute(self, context: KeeperParams, **kwargs): + self.warnings.clear() + self._validate_vault(context) + + vault = context.vault + configuration = self._find_configuration(vault, kwargs.get('config')) + self._validate_configuration(configuration, kwargs.get('config')) + + self._update_record_type_if_needed(vault, configuration, kwargs) + self._update_title_if_provided(configuration, kwargs) + + orig_gateway_uid, orig_shared_folder_uid = self._get_original_values(configuration) + self.parse_properties(vault, configuration, **kwargs) + self.verify_required(configuration) + + record_management.update_record(vault, configuration) + self._update_controller_and_folder_if_changed(vault, configuration, orig_gateway_uid, orig_shared_folder_uid) + + self._log_warnings() + vault.sync_down() + + def _validate_vault(self, context: KeeperParams): + """Validates that vault is initialized.""" + if not context.vault: + raise base.CommandError('Vault is not initialized. Login to initialize the vault.') + + def _find_configuration(self, vault: vault_online.VaultOnline, config_name: str): + """Finds a PAM configuration by UID or name.""" + if not config_name: + return None + + if config_name in vault.vault_data._records: + return vault.vault_data.load_record(config_name) + + config_name_lower = config_name.casefold() + for record in vault.vault_data.find_records(record_type=None, record_version=6): + if record.title.casefold() == config_name_lower: + return record + + return None + + def _validate_configuration(self, configuration, config_name: str): + """Validates that the configuration exists and is valid.""" + if not configuration: + raise base.CommandError(f'PAM configuration "{config_name}" not found') + + if not isinstance(configuration, vault_record.TypedRecord) or configuration.version != 6: + raise base.CommandError(f'PAM configuration "{config_name}" not found') + + def _update_record_type_if_needed(self, vault: vault_online.VaultOnline, configuration: vault_record.TypedRecord, + kwargs: dict): + """Updates the record type if config_type is provided and different.""" + config_type = kwargs.get('config_type') + if not config_type: + return + + record_type = CONFIG_TYPE_TO_RECORD_TYPE.get(config_type, configuration.record_type) + + if record_type != configuration.record_type: + configuration.type_name = record_type + record_type_def = vault.vault_data.get_record_type_by_name(record_type) + if record_type_def and record_type_def.fields: + RecordEditMixin.adjust_typed_record_fields(configuration, record_type_def.fields) + + def _update_title_if_provided(self, configuration: vault_record.TypedRecord, kwargs: dict): + """Updates the title if provided.""" + title = kwargs.get('title') + if title: + configuration.title = title + + def _get_original_values(self, configuration: vault_record.TypedRecord): + """Gets the original gateway and shared folder UIDs before updates.""" + field = configuration.get_typed_field('pamResources') + if not field: + raise base.CommandError('PAM configuration record does not contain resource field') + + value = field.get_default_value(dict) + if value: + return value.get('controllerUid') or '', value.get('folderUid') or '' + + return '', '' + + def _update_controller_and_folder_if_changed(self, vault: vault_online.VaultOnline, + configuration: vault_record.TypedRecord, + orig_gateway_uid: str, orig_shared_folder_uid: str): + """Updates controller and shared folder if they changed.""" + field = configuration.get_typed_field('pamResources') + value = field.get_default_value(dict) + if not value: + return + + gateway_uid = value.get('controllerUid') or '' + if gateway_uid != orig_gateway_uid: + self._set_configuration_controller(vault, configuration.record_uid, gateway_uid) + + shared_folder_uid = value.get('folderUid') or '' + if shared_folder_uid != orig_shared_folder_uid: + record_management.move_vault_objects(vault, [configuration.record_uid], shared_folder_uid) + + def _set_configuration_controller(self, vault: vault_online.VaultOnline, config_uid: str, gateway_uid: str): + """Sets the controller for the PAM configuration.""" + pcc = pam_pb2.PAMConfigurationController() + pcc.configurationUid = utils.base64_url_decode(config_uid) + pcc.controllerUid = utils.base64_url_decode(gateway_uid) + vault.keeper_auth.execute_auth_rest('pam/set_configuration_controller', pcc) + + def _log_warnings(self): + """Logs all warnings.""" + for w in self.warnings: + logger.warning(w) + + +class PAMConfigRemoveCommand(base.ArgparseCommand): + + def __init__(self): + parser = argparse.ArgumentParser(prog='pam config remove') + PAMConfigRemoveCommand.add_arguments_to_parser(parser) + super().__init__(parser) + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser): + parser.add_argument('--config', '-c', required=True, dest='pam_config', action='store', + help='PAM Configuration UID. To view all rotation settings with their UIDs, use command `pam config list`') + + def execute(self, context: KeeperParams, **kwargs): + self._validate_vault(context) + + vault = context.vault + pam_config_name = kwargs.get('pam_config') + pam_config_uid = self._find_configuration_uid(vault, pam_config_name) + + if not pam_config_uid: + raise base.CommandError(f'Configuration "{pam_config_name}" not found') + + record_management.delete_vault_objects(vault, [pam_config_uid]) + vault.sync_down() + + def _validate_vault(self, context: KeeperParams): + """Validates that vault is initialized.""" + if not context.vault: + raise base.CommandError('Vault is not initialized. Login to initialize the vault.') + + def _find_configuration_uid(self, vault: vault_online.VaultOnline, config_name: str) -> str: + """Finds a PAM configuration UID by UID or name.""" + if not config_name: + return None + + if config_name in vault.vault_data._records: + record = vault.vault_data.load_record(config_name) + if record and record.version == 6 and record.record_type in PAM_CONFIG_RECORD_TYPES: + return config_name + + config_name_lower = config_name.casefold() + for record in vault.vault_data.find_records(record_type=None, record_version=6): + if record.record_type in PAM_CONFIG_RECORD_TYPES: + if record.record_uid == config_name or record.title.casefold() == config_name_lower: + return record.record_uid + + return None + + +def validate_cron_field(field: str, min_val: int, max_val: int) -> bool: + # Accept *, single number, range, step, list, and L suffix for last day/week + pattern = r'^(\*|\d+L?|L[W]?|\d+-\d+|\*/\d+|\d+(,\d+)*|\d+-\d+/\d+)$' + if not re.match(pattern, field): + return False + + def is_valid_number(n: str) -> bool: + # Strip L and W suffix if present (for last day/week expressions) + n_stripped = n.rstrip('LW') + return n_stripped and n_stripped.isdigit() and min_val <= int(n_stripped) <= max_val + + parts = re.split(r'[,\-/]', field) + return all(part == '*' or part in ('L', 'LW') or is_valid_number(part) for part in parts if part != '*') + + +def validate_cron_expression(expr: str, for_rotation: bool = False) -> tuple[bool, str]: + parts = expr.strip().split() + + # All internal docs, MRD etc. specify that rotation schedule is using CRON format + # but actually back-end don't accept all valid standard CRON and uses unspecified custom CRON format + if for_rotation is True: + if len(parts) != 6: + return False, f"CRON: Rotation schedules require all 6 parts incl. seconds - ex. Daily at 04:00:00 cron: 0 0 4 * * ? got {len(parts)} parts" + if not(parts[3] == '?' or parts[5] == "?"): + logger.warning("CRON: Rotation schedule CRON format - must use ? character in one of these fields: day-of-week, day-of-month") + parts[3] = '*' if parts[3] == '?' else parts[3] + parts[5] = '*' if parts[5] == '?' else parts[5] + logger.debug("WARNING! Validating CRON expression for rotation - if you get 500 type errors make sure to validate your CRON using web vault UI") + + if len(parts) not in [5, 6]: + return False, f"CRON: Expected 5 or 6 fields, got {len(parts)}" + + if len(parts) == 6: + seconds, minute, hour, dom, month, dow = parts + if not validate_cron_field(seconds, 0, 59): + return False, "CRON: Invalid seconds field" + else: + minute, hour, dom, month, dow = parts + + validators = [ + (minute, 0, 59, "minute"), + (hour, 0, 23, "hour"), + (dom, 1, 31, "day of month"), + (month, 1, 12, "month"), + (dow, 0, 7, "day of week") + ] + + for field, min_val, max_val, name in validators: + if not validate_cron_field(field, min_val, max_val): + return False, f"CRON: Invalid {name} field" + + return True, "Valid cron expression" + diff --git a/keepercli-package/src/keepercli/commands/pedm_admin.py b/keepercli-package/src/keepercli/commands/pedm_admin.py index 576609a0..d354b9b4 100644 --- a/keepercli-package/src/keepercli/commands/pedm_admin.py +++ b/keepercli-package/src/keepercli/commands/pedm_admin.py @@ -18,6 +18,7 @@ from keepersdk.plugins.pedm import admin_plugin, pedm_shared, admin_types, admin_storage from keepersdk.plugins.pedm.pedm_shared import CollectionType from keepersdk.proto import NotificationCenter_pb2, pedm_pb2 +from keepersdk.vault import share_management_utils from . import base, pedm_aram from .. import prompt_utils, api from ..helpers import report_utils @@ -155,6 +156,20 @@ def resolve_existing_collections( found_collections[c.collection_uid] = c return list(found_collections.values()) + @staticmethod + def resolve_existing_approvals(pedm: admin_plugin.PedmPlugin, approval_names: Any) -> List[admin_types.PedmApproval]: + found_approvals: Dict[str, admin_types.PedmApproval] = {} + p: Optional[admin_types.PedmApproval] + if isinstance(approval_names, list): + for approval_name in approval_names: + a = pedm.approvals.get_entity(approval_name) + if a is None: + raise base.CommandError(f'Approval "{approval_name}" is not found') + found_approvals[a.approval_uid] = a + if len(found_approvals) == 0: + raise base.CommandError('No approvals were found') + return list(found_approvals.values()) + class PedmCommand(base.GroupCommand): def __init__(self): @@ -562,7 +577,7 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: verbose = kwargs.get('verbose') is True table = [] - headers = ['agent_uid', 'machine_name', 'deployment', 'disabled', 'created'] + headers = ['agent_uid', 'machine_name', 'deployment', 'disabled', 'created', 'last_seen'] active_agents: Set[str] = set() if verbose: headers.extend(('active', 'properties')) @@ -580,7 +595,8 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: machine_name = '' if isinstance(agent.properties, dict): machine_name = agent.properties.get('MachineName') or '' - row: List[Any] = [agent.agent_uid, machine_name, deployment_name, agent.disabled, time_created] + row: List[Any] = [agent.agent_uid, machine_name, deployment_name, agent.disabled, time_created, + plugin.agent_last_seen(agent.agent_uid)] if verbose: row.append(agent.agent_uid in active_agents) props: Optional[List[str]] = None @@ -1688,6 +1704,7 @@ def __init__(self): super().__init__('Manage PEDM approval requests') self.register_command(PedmApprovalListCommand(), 'list', 'l') self.register_command(PedmApprovalStatusCommand(), 'action', 'a') + self.register_command(PedmApprovalExtendCommand(), 'extend') self.default_verb = 'list' @@ -1732,6 +1749,39 @@ def execute(self, context: KeeperParams, **kwargs) -> Any: return report_utils.dump_report_data(table, headers, fmt=fmt, filename=kwargs.get('output')) +class PedmApprovalExtendCommand(base.ArgparseCommand, PedmUtils): + def __init__(self): + parser = argparse.ArgumentParser(prog='extend', description='Extend PEDM approval request expiration time') + expiration = parser.add_mutually_exclusive_group() + expiration.add_argument('--expire-at', dest='expire_at', action='store', help='UTC datetime') + expiration.add_argument('--expire-in', dest='expire_in', action='store', + help='expiration period ([(mi)nutes|(h)ours|(d)ays|(mo)nths|(y)ears])') + parser.add_argument('approval', nargs='+', help='Approval UID or Name') + super().__init__(parser) + + def execute(self, context: KeeperParams, **kwargs) -> None: + plugin = context.pedm_plugin + logger = api.get_logger() + + share_expiration = share_management_utils.get_share_expiration(kwargs.get('expire_at'), kwargs.get('expire_in')) + if share_expiration > 0: + share_expiration = share_expiration - int(datetime.datetime.now().timestamp()) + if share_expiration < 100: + raise base.CommandError('Expiration time must be at least 1 minute') + expire_in = share_expiration // 60 + approvals = PedmUtils.resolve_existing_approvals(plugin, kwargs.get('approval')) + + to_extend: List[admin_types.PedmUpdateApproval] = [] + for approval in approvals: + to_extend.append(admin_types.PedmUpdateApproval(approval_uid=approval.approval_uid, expire_in=expire_in)) + status_rs = plugin.extend_approvals(to_extend=to_extend) + if status_rs.update: + for status in status_rs.update: + if not status.success: + if isinstance(status, admin_types.EntityStatus): + logger.warning(f'Failed to extend approval "{status.entity_uid}": {status.message}') + + class PedmApprovalStatusCommand(base.ArgparseCommand): def __init__(self): parser = argparse.ArgumentParser(prog='action', description='Modify PEDM approval requests') diff --git a/keepercli-package/src/keepercli/commands/secrets_manager.py b/keepercli-package/src/keepercli/commands/secrets_manager.py index 141c4586..f285d014 100644 --- a/keepercli-package/src/keepercli/commands/secrets_manager.py +++ b/keepercli-package/src/keepercli/commands/secrets_manager.py @@ -70,7 +70,7 @@ def add_arguments_to_parser(parser: argparse.ArgumentParser): '-f', '--force', dest='force', action='store_true', help='Force add or remove app' ) parser.add_argument( - '--email', action='store', type=str, dest='email', help='Email of user to grant / remove application access to / from' + '--email', '-e', action='store', type=str, dest='email', help='Email of user to grant / remove application access to / from' ) parser.add_argument( '--admin', action='store_true', help='Allow share recipient to manage application' @@ -366,7 +366,7 @@ def add_client( current_time_ms + first_access_expire_duration * MILLISECONDS_PER_MINUTE ) access_expire_in_ms = ( - access_expire_in_min * MILLISECONDS_PER_MINUTE + (current_time_ms + access_expire_in_min * MILLISECONDS_PER_MINUTE) if access_expire_in_min else None ) @@ -385,7 +385,8 @@ def add_client( first_access_expire_duration_ms=first_access_expire_duration_ms, access_expire_in_ms=access_expire_in_ms, master_key=master_key, - server=server + server=server, + client_type=GENERAL ) tokens.append(token_data['token_info']) diff --git a/keepercli-package/src/keepercli/commands/share_report.py b/keepercli-package/src/keepercli/commands/share_report.py new file mode 100644 index 00000000..9aafc6e1 --- /dev/null +++ b/keepercli-package/src/keepercli/commands/share_report.py @@ -0,0 +1,281 @@ +"""Share Report command for Keeper CLI.""" + +import argparse +from typing import Any, Optional + +from keepersdk.vault import share_report + +from . import base +from ..helpers import report_utils +from ..params import KeeperParams +from .. import api + + +class ShareReportCommand(base.ArgparseCommand): + """Command to generate share reports for records and shared folders.""" + + def __init__(self): + parser = argparse.ArgumentParser( + prog='share-report', + description='Generates a report of shared records', + parents=[base.report_output_parser] + ) + self.add_arguments_to_parser(parser) + super().__init__(parser) + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser) -> None: + """Add command arguments to the parser. + + Args: + parser: The argument parser to add arguments to + """ + parser.add_argument( + '-r', '--record', + dest='record', + action='append', + help='record name or UID (can be specified multiple times)' + ) + parser.add_argument( + '-e', '--email', + dest='user', + action='append', + help='user email or team name to filter by (can be specified multiple times)' + ) + parser.add_argument( + '-o', '--owner', + dest='owner', + action='store_true', + help='display record ownership information' + ) + parser.add_argument( + '--share-date', + dest='share_date', + action='store_true', + help='include date when the record was shared. This data is available only to ' + 'users with permissions to execute reports for their company. ' + 'Example: share-report -v -o --share-date --format table' + ) + parser.add_argument( + '-sf', '--shared-folders', + dest='shared_folders', + action='store_true', + help='display shared folder detail information' + ) + parser.add_argument( + '-v', '--verbose', + dest='verbose', + action='store_true', + help='display verbose information with detailed permissions' + ) + parser.add_argument( + '-f', '--folders', + dest='folders', + action='store_true', + default=False, + help='limit report to shared folders (excludes shared records)' + ) + parser.add_argument( + '-tu', '--show-team-users', + action='store_true', + help='show shared-folder team members (to be used with -f flag, ' + 'ignored for non-admin accounts)' + ) + parser.add_argument( + 'container', + nargs='*', + type=str, + action='store', + help='path(s) or UID(s) of container(s) by which to filter records' + ) + + def execute(self, context: KeeperParams, **kwargs) -> Any: + """Execute the share-report command.""" + base.require_login(context) + + if kwargs.get('share_date'): + base.require_enterprise_admin(context) + + if context.vault is None: + raise base.CommandError('Vault is not initialized. Login to initialize vault.') + + output_format = kwargs.get('format', 'table') + output_file = kwargs.get('output') + show_team_users = kwargs.get('show_team_users', False) + verbose = kwargs.get('verbose', False) or show_team_users + + config = share_report.ShareReportConfig( + record_filter=kwargs.get('record'), + user_filter=kwargs.get('user'), + container_filter=kwargs.get('container') or None, + show_ownership=kwargs.get('owner', False), + show_share_date=kwargs.get('share_date', False), + folders_only=kwargs.get('folders', False), + verbose=verbose, + show_team_users=show_team_users + ) + + enterprise = context.enterprise_data + generator = share_report.ShareReportGenerator( + vault=context.vault, + enterprise=enterprise, + auth=context.auth, + config=config + ) + + if config.folders_only: + return self._generate_folders_report(generator, output_format, output_file) + if config.show_ownership: + return self._generate_ownership_report(generator, output_format, output_file, verbose) + if config.record_filter: + return self._generate_record_detail_report(generator, config) + if config.user_filter: + return self._generate_user_shares_report(generator, config, output_format, output_file) + return self._generate_summary_report(generator, output_format, output_file) + + def _generate_folders_report( + self, + generator: share_report.ShareReportGenerator, + output_format: str, + output_file: Optional[str] + ) -> Optional[str]: + """Generate shared folders report.""" + entries = generator.generate_shared_folders_report() + headers = share_report.ShareReportGenerator.get_headers(folders_only=True) + table = [[e.folder_uid, e.folder_name, e.shared_to, e.permissions, e.folder_path] + for e in entries] + + return report_utils.dump_report_data( + table, + self._format_headers(headers, output_format), + fmt=output_format, + filename=output_file, + title='Shared folders' + ) + + def _generate_ownership_report( + self, + generator: share_report.ShareReportGenerator, + output_format: str, + output_file: Optional[str], + verbose: bool + ) -> Optional[str]: + """Generate record ownership report.""" + entries = generator.generate_records_report() + headers = share_report.ShareReportGenerator.get_headers(ownership=True) + table = [ + [e.record_owner, e.record_uid, e.record_title, + e.shared_with if verbose else e.shared_with_count, + '\n'.join(e.folder_paths)] + for e in entries + ] + + return report_utils.dump_report_data( + table, + self._format_headers(headers, output_format, exclude_json=True), + fmt=output_format, + filename=output_file, + sort_by=0, + row_number=True + ) + + def _generate_record_detail_report( + self, + generator: share_report.ShareReportGenerator, + config: share_report.ShareReportConfig + ) -> None: + """Generate detailed report for specific records (always verbose).""" + logger = api.get_logger() + + verbose_config = share_report.ShareReportConfig( + record_filter=config.record_filter, + user_filter=config.user_filter, + container_filter=config.container_filter, + show_ownership=config.show_ownership, + show_share_date=config.show_share_date, + folders_only=config.folders_only, + verbose=True, + show_team_users=config.show_team_users + ) + + verbose_generator = share_report.ShareReportGenerator( + vault=generator.vault, + enterprise=generator._enterprise, + auth=generator._auth, + config=verbose_config + ) + + entries = verbose_generator.generate_records_report() + if not entries: + logger.info('No records found matching the criteria.') + return + + for entry in entries: + logger.info('') + logger.info(f'{"Record UID:":>20} {entry.record_uid}') + logger.info(f'{"Title:":>20} {entry.record_title}') + self._log_shared_with(logger, entry.shared_with) + logger.info('') + + def _log_shared_with(self, logger, shared_with: str) -> None: + """Log shared with information.""" + if not shared_with: + logger.info(f'{"Shared with:":>20} Not shared') + return + + for i, line in enumerate(shared_with.split('\n')): + label = 'Shared with:' if i == 0 else '' + logger.info(f'{label:>20} {line}') + + def _generate_user_shares_report( + self, + generator: share_report.ShareReportGenerator, + config: share_report.ShareReportConfig, + output_format: str, + output_file: Optional[str] + ) -> Optional[str]: + """Generate report of shares filtered by user.""" + entries = generator.generate_records_report() + headers = ['username', 'record_owner', 'record_uid', 'record_title'] + table = [ + [user, e.record_owner, e.record_uid, e.record_title] + for e in entries + for user in (config.user_filter or []) + ] + + return report_utils.dump_report_data( + table, + self._format_headers(headers, output_format), + fmt=output_format, + filename=output_file, + group_by=0, + row_number=True + ) + + def _generate_summary_report( + self, + generator: share_report.ShareReportGenerator, + output_format: str, + output_file: Optional[str] + ) -> Optional[str]: + """Generate summary report of shares by target.""" + entries = generator.generate_summary_report() + headers = share_report.ShareReportGenerator.get_headers() + table = [[e.shared_to, e.record_count, e.shared_folder_count] for e in entries] + + return report_utils.dump_report_data( + table, + self._format_headers(headers, output_format), + fmt=output_format, + filename=output_file, + group_by=0, + row_number=True + ) + + @staticmethod + def _format_headers(headers: list, output_format: str, exclude_json: bool = False) -> list: + """Format headers based on output format.""" + if output_format == 'table' or (exclude_json and output_format != 'json'): + return [report_utils.field_to_title(h) for h in headers] + return headers + diff --git a/keepercli-package/src/keepercli/commands/shared_records_report.py b/keepercli-package/src/keepercli/commands/shared_records_report.py new file mode 100644 index 00000000..144543fd --- /dev/null +++ b/keepercli-package/src/keepercli/commands/shared_records_report.py @@ -0,0 +1,114 @@ +"""Shared Records Report command for Keeper CLI.""" + +import argparse +import os +from typing import Any, List, Optional + +from keepersdk.vault import shared_records_report + +from . import base +from ..helpers import report_utils +from ..params import KeeperParams +from .. import api + + +class SharedRecordsReportCommand(base.ArgparseCommand): + """Command to generate shared records reports for a logged-in user.""" + + def __init__(self): + parser = argparse.ArgumentParser( + prog='shared-records-report', + description='Report shared records for a logged-in user', + parents=[base.report_output_parser] + ) + SharedRecordsReportCommand.add_arguments_to_parser(parser) + super().__init__(parser) + + @staticmethod + def add_arguments_to_parser(parser: argparse.ArgumentParser) -> None: + """Add command-specific arguments to the parser.""" + parser.add_argument( + '-tu', '--show-team-users', + dest='show_team_users', + action='store_true', + help='show members of team for records shared via share team folders.' + ) + parser.add_argument( + '--all-records', + dest='all_records', + action='store_true', + help='report on all records in the vault. only owned records are included if this argument is omitted.' + ) + parser.add_argument( + 'folder', + type=str, + nargs='*', + help='Optional (w/ multiple values allowed). Path or UID of folder containing the records to be shown' + ) + + def execute(self, context: KeeperParams, **kwargs) -> Any: + """Execute the shared-records-report command.""" + base.require_login(context) + base.require_enterprise_admin(context) + + if context.vault is None: + raise base.CommandError('Vault not initialized, login to initialize vault.') + + logger = api.get_logger() + output_format = kwargs.get('format', 'table') + output_file = kwargs.get('output') + show_team_users = kwargs.get('show_team_users', False) + all_records = kwargs.get('all_records', False) + folder_filter = kwargs.get('folder') or None + + # Log folder filter if provided + if folder_filter: + logger.info(f'Filtering by folder(s): {", ".join(folder_filter)}') + + config = shared_records_report.SharedRecordsReportConfig( + folder_filter=folder_filter, + show_team_users=show_team_users, + all_records=all_records + ) + + enterprise = getattr(context, 'enterprise_data', None) + generator = shared_records_report.SharedRecordsReportGenerator( + vault=context.vault, + enterprise=enterprise, + auth=context.auth, + config=config + ) + + rows: List[List[Any]] = list(generator.generate_report_rows()) + headers = shared_records_report.SharedRecordsReportGenerator.get_headers(all_records=all_records) + + if not rows: + logger.info('No shared records found matching the criteria.') + if output_format == 'json': + return report_utils.dump_report_data([], headers, fmt=output_format, filename=output_file) + return None + + if output_format != 'json': + headers = [report_utils.field_to_title(h) for h in headers] + + # Sort rows by title (index 2 if all_records, else index 1) + sort_index = 2 if all_records else 1 + rows.sort(key=lambda x: (x[sort_index] or '').lower() if len(x) > sort_index else '') + + result = report_utils.dump_report_data( + rows, + headers, + fmt=output_format, + filename=output_file, + row_number=True, + sort_by=(1, 3) if all_records else (0, 2) + ) + + if output_file: + _, ext = os.path.splitext(output_file) + if not ext: + output_file += '.json' if output_format == 'json' else '.csv' + logger.info(f'Report saved to: {os.path.abspath(output_file)}') + + return result + diff --git a/keepercli-package/src/keepercli/commands/shares.py b/keepercli-package/src/keepercli/commands/shares.py index 40cc05d0..53d160ab 100644 --- a/keepercli-package/src/keepercli/commands/shares.py +++ b/keepercli-package/src/keepercli/commands/shares.py @@ -154,6 +154,8 @@ def execute(self, context: KeeperParams, **kwargs) -> None: share_expiration = share_management_utils.get_share_expiration( kwargs.get('expire_at'), kwargs.get('expire_in') ) + + dry_run = kwargs.get('dry_run', False) request = RecordShares.prep_request( vault=vault, @@ -162,12 +164,12 @@ def execute(self, context: KeeperParams, **kwargs) -> None: emails=emails, share_expiration=share_expiration, action=action, - dry_run=kwargs.get('dry_run', False), + dry_run=dry_run, can_edit=kwargs.get('can_edit'), can_share=kwargs.get('can_share'), recursive=kwargs.get('recursive') ) - if request: + if request and not dry_run: success_responses, failed_responses = RecordShares.send_requests(vault, [request]) if success_responses: logger.info(f'{len(success_responses)} share requests were successfully processed') @@ -176,6 +178,10 @@ def execute(self, context: KeeperParams, **kwargs) -> None: for failed_response in failed_responses: logger.error(f'Failed to process share request: {failed_response}') vault.sync_down() + elif request and dry_run: + ShareRecordCommand._print_share_table(request, vault) + else: + logger.info('Nothing to do') def _validate_and_replace_contacts(self, vault, emails: list, force: bool) -> list: """Validate emails against known contacts and optionally replace with matches.""" @@ -222,7 +228,39 @@ def get_contact(user, contacts): return contact return None - + + @staticmethod + def _print_share_table(request: record_pb2.RecordShareUpdateRequest, vault: vault_online.VaultOnline) -> None: + headers = ['Username', 'Record UID', 'Title', 'Share Action', 'Expiration'] + table = [] + for attr in ['addSharedRecord', 'updateSharedRecord', 'removeSharedRecord']: + if hasattr(request, attr): + for obj in getattr(request, attr): + record_uid = utils.base64_url_encode(obj.recordUid) + username = obj.toUsername + record = vault.vault_data.get_record(record_uid=record_uid) + row = [username, record_uid, record.title] + if attr in ['addSharedRecord', 'updateSharedRecord']: + if obj.transfer: + row.append('Transfer Ownership') + elif obj.editable or obj.shareable: + if obj.editable and obj.shareable: + row.append('Can Edit & Share') + elif obj.editable: + row.append('Can Edit') + else: + row.append('Can Share') + else: + row.append('Read Only') + else: + row.append('Remove Share') + if obj.expiration > 0: + dt = datetime.datetime.fromtimestamp(obj.expiration//1000) + row.append(str(dt)) + else: + row.append(None) + table.append(row) + report_utils.dump_report_data(table, headers, row_number=True, group_by=0) class ShareFolderCommand(base.ArgparseCommand): def __init__(self): diff --git a/keepercli-package/src/keepercli/helpers/gateway_utils.py b/keepercli-package/src/keepercli/helpers/gateway_utils.py index be6194a9..7a462169 100644 --- a/keepercli-package/src/keepercli/helpers/gateway_utils.py +++ b/keepercli-package/src/keepercli/helpers/gateway_utils.py @@ -2,7 +2,7 @@ from typing import List from keepersdk.vault import vault_online -from keepersdk.proto import pam_pb2 +from keepersdk.proto import pam_pb2, enterprise_pb2 from keepersdk.vault import ksm_management @@ -69,7 +69,8 @@ def create_gateway( first_access_expire_duration_ms=first_access_expire_duration_ms, access_expire_in_ms=None, master_key=master_key, - server=vault.keeper_auth.keeper_endpoint.server + server=vault.keeper_auth.keeper_endpoint.server, + client_type=enterprise_pb2.DISCOVERY_AND_ROTATION_CONTROLLER ) return _extract_one_time_token(one_time_token_dict) diff --git a/keepercli-package/src/keepercli/register_commands.py b/keepercli-package/src/keepercli/register_commands.py index 07e28906..1e3facdb 100644 --- a/keepercli-package/src/keepercli/register_commands.py +++ b/keepercli-package/src/keepercli/register_commands.py @@ -29,7 +29,8 @@ def register_commands(commands: base.CliCommands, scopes: Optional[base.CommandS if not scopes or bool(scopes & base.CommandScope.Vault): from .commands import (vault_folder, vault, vault_record, record_edit, importer_commands, breachwatch, record_type, secrets_manager, shares, password_report, trash, record_file_report, - record_handling_commands, register, password_generate, verify_records) + record_handling_commands, register, password_generate, verify_records, + shared_records_report, share_report) commands.register_command('sync-down', vault.SyncDownCommand(), base.CommandScope.Vault, 'd') commands.register_command('cd', vault_folder.FolderCdCommand(), base.CommandScope.Vault) @@ -80,13 +81,15 @@ def register_commands(commands: base.CliCommands, scopes: Optional[base.CommandS commands.register_command('record-permission', record_handling_commands.RecordPermissionCommand(), base.CommandScope.Vault) commands.register_command('trash', trash.TrashCommand(), base.CommandScope.Vault) commands.register_command('verify-shared-folders', verify_records.VerifySharedFoldersCommand(), base.CommandScope.Vault) - commands.register_command('verify-records', verify_records.VerifyRecordsCommand(), base.CommandScope.Vault) + commands.register_command('verify-records', verify_records.VerifyRecordsCommand(), base.CommandScope.Vault) + commands.register_command('shared-records-report', shared_records_report.SharedRecordsReportCommand(), base.CommandScope.Vault) + commands.register_command('share-report', share_report.ShareReportCommand(), base.CommandScope.Vault) if not scopes or bool(scopes & base.CommandScope.Enterprise): from .commands import (enterprise_info, enterprise_node, enterprise_role, enterprise_team, enterprise_user, enterprise_create_user, importer_commands, audit_report, audit_alert, audit_log, transfer_account, pedm_admin, msp, user_report, - security_audit_report) + aging_report, action_report, security_audit_report) from .commands.pam import keeper_pam commands.register_command('create-user', enterprise_create_user.CreateEnterpriseUserCommand(), base.CommandScope.Enterprise, 'ecu') @@ -108,3 +111,6 @@ def register_commands(commands: base.CliCommands, scopes: Optional[base.CommandS commands.register_command('team-approve', enterprise_team.TeamApproveCommand(), base.CommandScope.Enterprise) commands.register_command('user-report', user_report.UserReportCommand(), base.CommandScope.Enterprise, 'ur') commands.register_command('security-audit-report', security_audit_report.SecurityAuditReportCommand(), base.CommandScope.Enterprise, 'sar') + commands.register_command('aging-report', aging_report.AgingReportCommand(), base.CommandScope.Enterprise, 'ar') + commands.register_command('action-report', action_report.ActionReportCommand(), base.CommandScope.Enterprise, 'acr') + commands.register_command('pam', keeper_pam.PAMControllerCommand(), base.CommandScope.Enterprise) diff --git a/keepersdk-package/requirements.txt b/keepersdk-package/requirements.txt index c767bfdf..04410975 100644 --- a/keepersdk-package/requirements.txt +++ b/keepersdk-package/requirements.txt @@ -5,3 +5,4 @@ protobuf>=5.28.3 websockets>=13.1 fido2>=2.0.0; python_version>='3.10' email-validator>=2.0.0 +pydantic>=2.6.4; python_version>='3.8' \ No newline at end of file diff --git a/keepersdk-package/src/keepersdk/authentication/configuration.py b/keepersdk-package/src/keepersdk/authentication/configuration.py index 1957555b..f4154e49 100644 --- a/keepersdk-package/src/keepersdk/authentication/configuration.py +++ b/keepersdk-package/src/keepersdk/authentication/configuration.py @@ -3,7 +3,7 @@ import json import os from typing import Type, Union, TypeVar, Optional, Generic, Iterator, List, Dict -from urllib.parse import urlparse +from urllib.parse import urlparse, urlsplit from .. import utils from ..constants import DEFAULT_KEEPER_SERVER @@ -15,6 +15,8 @@ def adjust_username(username: str) -> str: def adjust_servername(server: str) -> str: if server: + if '://' not in server: + server = '//' + server url = urlparse(server) if url.netloc: return url.netloc.lower() diff --git a/keepersdk-package/src/keepersdk/authentication/endpoint.py b/keepersdk-package/src/keepersdk/authentication/endpoint.py index 6f623a2f..f351b3e7 100644 --- a/keepersdk-package/src/keepersdk/authentication/endpoint.py +++ b/keepersdk-package/src/keepersdk/authentication/endpoint.py @@ -150,7 +150,7 @@ def execute_router_rest(self, endpoint: str, *, session_token: bytes, payload: O logger.debug('>>> [ROUTER] POST Request: [%s]', url) if payload is not None: payload = crypto.encrypt_aes_v2(payload, transmission_key) - response = requests.post(url, headers=headers, data=payload) + response = requests.post(url, headers=headers, data=payload, verify=get_certificate_check()) logger.debug('<<< [ROUTER] Response Code: [%d]', response.status_code) if response.status_code == 200: diff --git a/keepersdk-package/src/keepersdk/authentication/login_auth.py b/keepersdk-package/src/keepersdk/authentication/login_auth.py index c31c3ba4..2ef6cea6 100644 --- a/keepersdk-package/src/keepersdk/authentication/login_auth.py +++ b/keepersdk-package/src/keepersdk/authentication/login_auth.py @@ -189,8 +189,8 @@ class LoginContext: def __init__(self) -> None: self.username = '' self.passwords: List[str] = [] - self.clone_code = b'' - self.device_token = b'' + self.clone_code: Optional[bytes] = None + self.device_token: Optional[bytes] = None self.device_private_key: Optional[ec.EllipticCurvePrivateKey] = None self.message_session_uid: bytes = crypto.get_random_bytes(16) self.account_type: AccountAuthType = AccountAuthType.Regular @@ -248,6 +248,9 @@ def login(self, username: str, *passwords: str) -> None: try: _ensure_device_token_loaded(self) _start_login(self) + except errors.InvalidDeviceTokenError: + _ensure_device_token_loaded(self, new_device=True) + _start_login(self) except errors.RegionRedirectError as rr: _redirect_to_region(self, rr.region_host) _ensure_device_token_loaded(self) @@ -377,7 +380,7 @@ def _tfa_channel_info_keeper_to_sdk(channel_info: APIRequest_pb2.TwoFactorChanne return info -def _ensure_device_token_loaded(login: LoginAuth) -> None: +def _ensure_device_token_loaded(login: LoginAuth, *, new_device=False) -> None: logger = utils.get_logger() attempt = 0 @@ -385,6 +388,13 @@ def _ensure_device_token_loaded(login: LoginAuth) -> None: context.clone_code = b'' config = login.keeper_endpoint.get_configuration_storage().get() server = login.keeper_endpoint.server + if new_device: + context.device_token = None + context.device_private_key = None + all_devices = [x.device_token for x in config.devices().list()] + for device_token in all_devices: + config.devices().delete(device_token) + logger.info('Registering a new device') while attempt < 6: attempt += 1 diff --git a/keepersdk-package/src/keepersdk/authentication/notifications.py b/keepersdk-package/src/keepersdk/authentication/notifications.py index 7e3f03ea..8f997b61 100644 --- a/keepersdk-package/src/keepersdk/authentication/notifications.py +++ b/keepersdk-package/src/keepersdk/authentication/notifications.py @@ -117,6 +117,7 @@ async def main_loop(self) -> None: if url.startswith('wss://'): ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if not endpoint.get_certificate_check(): + ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE try: async with websockets.connect( diff --git a/keepersdk-package/src/keepersdk/enterprise/action_report.py b/keepersdk-package/src/keepersdk/enterprise/action_report.py new file mode 100644 index 00000000..cc5bd7db --- /dev/null +++ b/keepersdk-package/src/keepersdk/enterprise/action_report.py @@ -0,0 +1,416 @@ +"""Enterprise action report functionality for Keeper SDK.""" + +import dataclasses +import datetime +import logging +from collections import defaultdict, deque +from typing import Any, Dict, List, Optional, Set, Tuple + +from ..authentication import keeper_auth +from . import enterprise_types + +API_EVENT_SUMMARY_ROW_LIMIT = 2000 +DEFAULT_DAYS_SINCE = 30 +LOCKED_DEFAULT_DAYS = 90 + +logger = logging.getLogger(__name__) + + +class TargetUserStatus: + NO_LOGON = 'no-logon' + NO_UPDATE = 'no-update' + LOCKED = 'locked' + INVITED = 'invited' + NO_RECOVERY = 'no-recovery' + + +class AdminAction: + NONE = 'none' + LOCK = 'lock' + DELETE = 'delete' + TRANSFER = 'transfer' + + +STATUS_EVENT_TYPES: Dict[str, List[str]] = { + TargetUserStatus.NO_LOGON: ['login', 'login_console', 'chat_login', 'accept_invitation'], + TargetUserStatus.NO_UPDATE: ['record_add', 'record_update'], + TargetUserStatus.LOCKED: ['lock_user'], + TargetUserStatus.INVITED: ['send_invitation', 'auto_invite_user'], + TargetUserStatus.NO_RECOVERY: ['change_security_question', 'account_recovery_setup'], +} + + +@dataclasses.dataclass +class ActionReportEntry: + enterprise_user_id: int + email: str + full_name: str = '' + status: str = '' + transfer_status: str = '' + node_path: str = '' + roles: Optional[List[str]] = None + teams: Optional[List[str]] = None + tfa_enabled: bool = False + + +@dataclasses.dataclass +class ActionReportConfig: + target_user_status: str = TargetUserStatus.NO_LOGON + days_since: Optional[int] = None + node_name: Optional[str] = None + apply_action: str = AdminAction.NONE + target_user: Optional[str] = None + dry_run: bool = False + force: bool = False + + +@dataclasses.dataclass +class ActionResult: + action: str + status: str + affected_count: int = 0 + server_message: str = 'n/a' + + def to_text(self) -> str: + return (f'\tCOMMAND: {self.action}\n' + f'\tSTATUS: {self.status}\n' + f'\tSERVER MESSAGE: {self.server_message}\n' + f'\tAFFECTED: {self.affected_count}') + + +class ActionReportGenerator: + def __init__( + self, + enterprise_data: enterprise_types.IEnterpriseData, + auth: keeper_auth.KeeperAuth, + config: Optional[ActionReportConfig] = None + ) -> None: + self._enterprise_data = enterprise_data + self._auth = auth + self._config = config or ActionReportConfig() + self._user_teams: Optional[Dict[int, Set[str]]] = None + self._user_roles: Optional[Dict[int, Set[int]]] = None + self._team_roles: Optional[Dict[str, Set[int]]] = None + self._node_children: Optional[Dict[int, Set[int]]] = None + + @property + def enterprise_data(self) -> enterprise_types.IEnterpriseData: + return self._enterprise_data + + @property + def config(self) -> ActionReportConfig: + return self._config + + def _get_days_since(self) -> int: + if self._config.days_since is not None: + return self._config.days_since + if self._config.target_user_status == TargetUserStatus.LOCKED: + return LOCKED_DEFAULT_DAYS + return DEFAULT_DAYS_SINCE + + def _build_user_teams_lookup(self) -> Dict[int, Set[str]]: + if self._user_teams is not None: + return self._user_teams + + self._user_teams = defaultdict(set) + for team_user in self._enterprise_data.team_users.get_all_links(): + self._user_teams[team_user.enterprise_user_id].add(team_user.team_uid) + return self._user_teams + + def _build_user_roles_lookup(self) -> Dict[int, Set[int]]: + if self._user_roles is not None: + return self._user_roles + + self._user_roles = defaultdict(set) + for role_user in self._enterprise_data.role_users.get_all_links(): + self._user_roles[role_user.enterprise_user_id].add(role_user.role_id) + return self._user_roles + + def _build_team_roles_lookup(self) -> Dict[str, Set[int]]: + if self._team_roles is not None: + return self._team_roles + + self._team_roles = defaultdict(set) + for role_team in self._enterprise_data.role_teams.get_all_links(): + self._team_roles[role_team.team_uid].add(role_team.role_id) + return self._team_roles + + def _build_node_children_lookup(self) -> Dict[int, Set[int]]: + if self._node_children is not None: + return self._node_children + + self._node_children = defaultdict(set) + for node in self._enterprise_data.nodes.get_all_entities(): + if node.parent_id: + self._node_children[node.parent_id].add(node.node_id) + return self._node_children + + def _get_descendant_nodes(self, node_id: int) -> Set[int]: + children_lookup = self._build_node_children_lookup() + descendants = {node_id} + queue = deque([node_id]) + + while queue: + current_id = queue.popleft() + child_ids = children_lookup.get(current_id, set()) + for child_id in child_ids: + if child_id not in descendants: + descendants.add(child_id) + queue.append(child_id) + + return descendants + + def _resolve_node(self, node_name: str) -> Optional[enterprise_types.Node]: + if not node_name: + return None + + if node_name.isnumeric(): + node = self._enterprise_data.nodes.get_entity(int(node_name)) + if node: + return node + + node_name_lower = node_name.lower() + if node_name_lower == self._enterprise_data.enterprise_info.enterprise_name.lower(): + return self._enterprise_data.root_node + + matching_nodes = [ + n for n in self._enterprise_data.nodes.get_all_entities() + if n.name and n.name.lower() == node_name_lower + ] + + if len(matching_nodes) == 1: + return matching_nodes[0] + elif len(matching_nodes) > 1: + logger.warning(f'More than one node "{node_name}" found. Use Node ID.') + return None + + return None + + def _get_user_role_ids(self, user_id: int) -> Set[int]: + user_roles = self._build_user_roles_lookup() + user_teams = self._build_user_teams_lookup() + team_roles = self._build_team_roles_lookup() + + role_ids = set(user_roles.get(user_id, set())) + for team_uid in user_teams.get(user_id, set()): + role_ids.update(team_roles.get(team_uid, set())) + + return role_ids + + def _get_user_team_names(self, user_id: int) -> List[str]: + user_teams = self._build_user_teams_lookup() + team_names = [] + for team_uid in user_teams.get(user_id, set()): + team = self._enterprise_data.teams.get_entity(team_uid) + if team: + team_names.append(team.name) + return sorted(team_names, key=str.lower) + + def _get_user_role_names(self, user_id: int) -> List[str]: + role_names = [] + for role_id in self._get_user_role_ids(user_id): + role = self._enterprise_data.roles.get_entity(role_id) + if role: + role_names.append(role.name) + return sorted(role_names, key=str.lower) + + @staticmethod + def get_node_path( + enterprise_data: enterprise_types.IEnterpriseData, + node_id: int, + omit_root: bool = False + ) -> str: + nodes: List[str] = [] + n_id = node_id + while isinstance(n_id, int) and n_id > 0: + node = enterprise_data.nodes.get_entity(n_id) + if not node: + break + n_id = node.parent_id or 0 + if not omit_root or n_id > 0: + node_name = node.name + if not node_name and node.node_id == enterprise_data.root_node.node_id: + node_name = enterprise_data.enterprise_info.enterprise_name + nodes.append(node_name) + nodes.reverse() + return '\\'.join(nodes) + + @staticmethod + def get_user_status_text(user: enterprise_types.User) -> str: + if user.status == 'invited': + return 'Invited' + if user.lock > 0: + return 'Locked' if user.lock == 1 else 'Disabled' + return 'Active' + + @staticmethod + def get_user_transfer_status_text(user: enterprise_types.User) -> str: + transfer_status = user.transfer_acceptance_status + if transfer_status is not None: + status_map = {1: '', 2: 'Not accepted', 3: 'Partially accepted', 4: 'Transfer accepted'} + if transfer_status in status_map: + return status_map[transfer_status] + + if isinstance(user.account_share_expiration, int) and user.account_share_expiration > 0: + expire_at = datetime.datetime.fromtimestamp(user.account_share_expiration / 1000.0) + if expire_at < datetime.datetime.now(): + return 'Blocked' + return 'Pending Transfer' + return '' + + def _get_users_by_status(self) -> Tuple[List[enterprise_types.User], List[enterprise_types.User], List[enterprise_types.User]]: + active = [] + locked = [] + invited = [] + + for user in self._enterprise_data.users.get_all_entities(): + if user.status == 'invited': + invited.append(user) + elif user.status == 'active': + if user.lock > 0: + locked.append(user) + else: + active.append(user) + + return active, locked, invited + + def _filter_users_by_node(self, users: List[enterprise_types.User]) -> List[enterprise_types.User]: + node_name = self._config.node_name + if not node_name: + return users + + node = self._resolve_node(node_name) + if not node: + logger.warning(f'Node "{node_name}" not found') + return [] + + target_nodes = self._get_descendant_nodes(node.node_id) + return [u for u in users if u.node_id in target_nodes] + + def _query_users_with_events( + self, + usernames: Set[str], + event_types: List[str], + days_since: int, + username_field: str = 'username' + ) -> Set[str]: + if not usernames: + return set() + + now = datetime.datetime.now(tz=datetime.timezone.utc) + min_dt = now - datetime.timedelta(days=days_since) + + users_with_events: Set[str] = set() + limit = API_EVENT_SUMMARY_ROW_LIMIT + username_list = list(usernames) + + while username_list: + batch = username_list[:limit] + username_list = username_list[limit:] + + report_filter: Dict[str, Any] = { + 'audit_event_type': event_types, + 'created': {'min': int(min_dt.timestamp())}, + username_field: batch + } + + rq = { + 'command': 'get_audit_event_reports', + 'report_type': 'span', + 'scope': 'enterprise', + 'aggregate': ['last_created'], + 'columns': [username_field], + 'filter': report_filter, + 'limit': limit + } + + try: + rs = self._auth.execute_auth_command(rq) + for event in rs.get('audit_event_overview_report_rows', []): + username = event.get(username_field, '').lower() + if username: + users_with_events.add(username) + except Exception as e: + logger.debug(f'Error querying audit events: {e}') + + return users_with_events + + def _get_target_users(self) -> List[enterprise_types.User]: + active, locked, invited = self._get_users_by_status() + active = self._filter_users_by_node(active) + locked = self._filter_users_by_node(locked) + invited = self._filter_users_by_node(invited) + + target_status = self._config.target_user_status + days_since = self._get_days_since() + + if target_status == TargetUserStatus.NO_LOGON: + candidates = active + event_types = STATUS_EVENT_TYPES[TargetUserStatus.NO_LOGON] + username_field = 'username' + elif target_status == TargetUserStatus.NO_UPDATE: + candidates = active + event_types = STATUS_EVENT_TYPES[TargetUserStatus.NO_UPDATE] + username_field = 'username' + elif target_status == TargetUserStatus.LOCKED: + candidates = locked + event_types = STATUS_EVENT_TYPES[TargetUserStatus.LOCKED] + username_field = 'to_username' + elif target_status == TargetUserStatus.INVITED: + candidates = invited + event_types = STATUS_EVENT_TYPES[TargetUserStatus.INVITED] + username_field = 'email' + elif target_status == TargetUserStatus.NO_RECOVERY: + candidates = active + event_types = STATUS_EVENT_TYPES[TargetUserStatus.NO_RECOVERY] + username_field = 'username' + else: + logger.warning(f'Invalid target_user_status: {target_status}') + return [] + + if not candidates: + return [] + + candidate_usernames = {u.username.lower() for u in candidates} + users_with_actions = self._query_users_with_events( + candidate_usernames, event_types, days_since, username_field + ) + return [u for u in candidates if u.username.lower() not in users_with_actions] + + def generate_report(self) -> List[ActionReportEntry]: + target_users = self._get_target_users() + + report_entries: List[ActionReportEntry] = [] + + for user in target_users: + entry = ActionReportEntry( + enterprise_user_id=user.enterprise_user_id, + email=user.username, + full_name=user.full_name or '', + status=self.get_user_status_text(user), + transfer_status=self.get_user_transfer_status_text(user), + node_path=self.get_node_path(self._enterprise_data, user.node_id, omit_root=False), + tfa_enabled=user.tfa_enabled + ) + + entry.roles = self._get_user_role_names(user.enterprise_user_id) + entry.teams = self._get_user_team_names(user.enterprise_user_id) + + report_entries.append(entry) + + report_entries.sort(key=lambda x: x.email.lower()) + return report_entries + + @staticmethod + def get_allowed_actions(target_status: str) -> Set[str]: + default_allowed = {AdminAction.NONE} + + status_actions = { + TargetUserStatus.NO_LOGON: {*default_allowed, AdminAction.LOCK}, + TargetUserStatus.NO_UPDATE: default_allowed, + TargetUserStatus.LOCKED: {*default_allowed, AdminAction.DELETE, AdminAction.TRANSFER}, + TargetUserStatus.INVITED: {*default_allowed, AdminAction.DELETE}, + TargetUserStatus.NO_RECOVERY: default_allowed, + } + + return status_actions.get(target_status, default_allowed) diff --git a/keepersdk-package/src/keepersdk/enterprise/aging_report.py b/keepersdk-package/src/keepersdk/enterprise/aging_report.py new file mode 100644 index 00000000..7c42bf9d --- /dev/null +++ b/keepersdk-package/src/keepersdk/enterprise/aging_report.py @@ -0,0 +1,557 @@ +"""Enterprise password aging report functionality for Keeper SDK.""" + +import dataclasses +import datetime +import json +import logging +import os +import traceback +from typing import Optional, List, Dict, Any, Iterable, Tuple + +from ..authentication import keeper_auth +from ..proto import enterprise_pb2 +from .. import crypto, utils +from . import enterprise_types +from ..vault import vault_online + + +API_EVENT_SUMMARY_ROW_LIMIT = 1000 +DEFAULT_PERIOD_DAYS = 90 +SEARCH_HISTORY_YEARS = 5 +MAX_PAGINATION_ITERATIONS = 10000 # Safety limit to prevent infinite loops +logger = logging.getLogger(__name__) + + +@dataclasses.dataclass +class AgingReportEntry: + """Represents a single record entry in the aging report.""" + record_uid: str + owner_email: str + title: str = '' + last_changed: Optional[datetime.datetime] = None + record_created: Optional[datetime.datetime] = None + shared: bool = False + record_url: str = '' + shared_folder_uid: Optional[List[str]] = None + in_trash: bool = False + + +@dataclasses.dataclass +class AgingReportConfig: + """Configuration for aging report generation.""" + period_days: int = DEFAULT_PERIOD_DAYS + cutoff_date: Optional[datetime.datetime] = None + username: Optional[str] = None + exclude_deleted: bool = False + in_shared_folder: bool = False + rebuild: bool = False + delete_cache: bool = False + no_cache: bool = False + server: str = 'keepersecurity.com' + + +class AgingReportGenerator: + """Generates password aging reports for enterprise records. + + This class identifies records whose passwords have not been changed + within a specified period. It fetches data from the compliance API + and falls back to audit events if needed. + + Usage: + generator = AgingReportGenerator(enterprise_data, auth, config) + entries = generator.generate_report() + """ + + def __init__( + self, + enterprise_data: enterprise_types.IEnterpriseData, + auth: keeper_auth.KeeperAuth, + config: Optional[AgingReportConfig] = None, + vault: Optional[vault_online.VaultOnline] = None + ) -> None: + self._enterprise_data = enterprise_data + self._auth = auth + self._config = config or AgingReportConfig() + self._vault = vault + self._email_to_user_id: Optional[Dict[str, int]] = None + self._user_id_to_email: Optional[Dict[int, str]] = None + self._records: Dict[str, Dict[str, Any]] = {} + self._record_shared_folders: Dict[str, List[str]] = {} + + @property + def enterprise_data(self) -> enterprise_types.IEnterpriseData: + return self._enterprise_data + + @property + def config(self) -> AgingReportConfig: + return self._config + + def get_cache_file_path(self, enterprise_id: int) -> str: + """Get the path to the local cache database file.""" + home_dir = os.path.expanduser('~') + cache_dir = os.path.join(home_dir, '.keeper') + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + return os.path.join(cache_dir, f'aging_cache_{enterprise_id}.db') + + def delete_local_cache(self, enterprise_id: int) -> bool: + """Delete the local database cache file.""" + cache_file = self.get_cache_file_path(enterprise_id) + if os.path.isfile(cache_file): + os.remove(cache_file) + return True + return False + + def _get_cutoff_timestamp(self) -> int: + """Get the cutoff timestamp based on config.""" + if self._config.cutoff_date: + return int(self._config.cutoff_date.timestamp()) + else: + now = datetime.datetime.now() + cutoff = now - datetime.timedelta(days=self._config.period_days) + return int(cutoff.timestamp()) + + def _build_user_lookups(self) -> None: + """Build lookups between email and enterprise_user_id.""" + if self._email_to_user_id is None: + self._email_to_user_id = {} + self._user_id_to_email = {} + for user in self._enterprise_data.users.get_all_entities(): + email = user.username.lower() + user_id = user.enterprise_user_id + self._email_to_user_id[email] = user_id + self._user_id_to_email[user_id] = email + + def _get_target_username(self) -> Optional[str]: + """Get normalized target username for filtering.""" + if not self._config.username: + return None + return self._config.username.lower() + + def _get_search_min_timestamp(self) -> int: + """Get minimum timestamp for historical searches.""" + return int((datetime.datetime.now() - datetime.timedelta(days=365 * SEARCH_HISTORY_YEARS)).timestamp()) + + def _fetch_paginated_audit_events( + self, + event_types: List[str], + report_type: str = 'raw' + ) -> Iterable[Dict[str, Any]]: + """Fetch paginated audit events. Yields individual events.""" + search_min_ts = self._get_search_min_timestamp() + audit_filter: Dict[str, Any] = { + 'audit_event_type': event_types, + 'created': {'min': search_min_ts} + } + + rq: Dict[str, Any] = { + 'command': 'get_audit_event_reports', + 'scope': 'enterprise', + 'report_type': report_type, + 'filter': audit_filter, + 'limit': API_EVENT_SUMMARY_ROW_LIMIT, + 'order': 'ascending' + } + + if report_type == 'span': + rq['columns'] = ['record_uid', 'audit_event_type'] + rq['aggregate'] = ['last_created'] + del rq['order'] + + last_ts = search_min_ts + iteration_count = 0 + + while True: + iteration_count += 1 + if iteration_count > MAX_PAGINATION_ITERATIONS: + logger.warning(f'Reached maximum pagination iterations ({MAX_PAGINATION_ITERATIONS}). Stopping to prevent infinite loop.') + break + + try: + rs = self._auth.execute_auth_command(rq) + events = rs.get('audit_event_overview_report_rows', []) + + if not events: + break + + for event in events: + yield event + ts_field = 'last_created' if report_type == 'span' else 'created' + last_ts = max(last_ts, int(event.get(ts_field, 0))) + + if len(events) < API_EVENT_SUMMARY_ROW_LIMIT: + break + + if report_type == 'span': + audit_filter['created']['max'] = last_ts + 1 + else: + audit_filter['created'] = {'min': last_ts} + except Exception as e: + logger.debug(f'Error fetching audit events: {e}') + break + + def get_record_sfs(self, record_uid: str) -> List[str]: + """Get list of shared folder UIDs where the record exists.""" + return self._record_shared_folders.get(record_uid, []) + + def _get_ec_private_key(self) -> Optional[bytes]: + """Get the enterprise EC private key for decryption.""" + return self._enterprise_data.enterprise_info.ec_private_key + + def _decrypt_record_data(self, encrypted_data: bytes) -> Dict[str, Any]: + """Decrypt record data using EC private key.""" + if not encrypted_data: + return {} + + ec_key = self._get_ec_private_key() + if ec_key is None: + return {} + + try: + data_json = crypto.decrypt_ec(encrypted_data, ec_key) + return json.loads(data_json.decode('utf-8')) + except Exception as e: + logger.debug(f'Failed to decrypt record data: {e}') + return {} + + def _fetch_compliance_data(self, user_ids: Optional[List[int]] = None) -> None: + """Fetch record data from compliance API.""" + if user_ids is None: + user_ids = [u.enterprise_user_id for u in self._enterprise_data.users.get_all_entities()] + + if not user_ids: + logger.warning('No enterprise users found') + return + + rq = enterprise_pb2.PreliminaryComplianceDataRequest() + rq.includeNonShared = True + rq.includeTotalMatchingRecordsInFirstResponse = True + for uid in user_ids: + rq.enterpriseUserIds.append(uid) + + has_more = True + continuation_token = None + + while has_more: + if continuation_token: + rq.continuationToken = continuation_token + + try: + rs = self._auth.execute_auth_rest( + 'enterprise/get_preliminary_compliance_data', + rq, + response_type=enterprise_pb2.PreliminaryComplianceDataResponse + ) + + for user_data in rs.auditUserData: + user_id = user_data.enterpriseUserId + owner_email = self._user_id_to_email.get(user_id, '') + + for record in user_data.auditUserRecords: + record_uid = utils.base64_url_encode(record.recordUid) + record_data = self._decrypt_record_data(record.encryptedData) + + self._records[record_uid] = { + 'record_uid': record_uid, + 'owner_email': owner_email, + 'owner_user_id': user_id, + 'title': record_data.get('title', ''), + 'shared': record.shared, + 'created_ts': 0, + 'pw_changed_ts': 0, + 'in_trash': record_data.get('in_trash', False) + } + + has_more = rs.hasMore and rs.continuationToken + if has_more: + continuation_token = rs.continuationToken + + except Exception as e: + logger.warning(f'Error fetching compliance data: {e}') + logger.debug(traceback.format_exc()) + break + + logger.debug(f'Fetched {len(self._records)} records from compliance API') + + def _update_timestamps_from_audit_events(self) -> None: + """Update records with timestamps from audit events using span reports.""" + created_lookup: Dict[str, int] = {} + folder_add_lookup: Dict[str, int] = {} + pw_change_lookup: Dict[str, int] = {} + + event_types = ['record_add', 'record_password_change', 'folder_add_record'] + for event in self._fetch_paginated_audit_events(event_types, report_type='span'): + record_uid = event.get('record_uid', '') + if not record_uid or record_uid not in self._records: + continue + + event_type = event.get('audit_event_type', '') + event_ts = int(event.get('last_created', 0)) + + if event_type == 'record_add': + created_lookup.setdefault(record_uid, event_ts) + elif event_type == 'folder_add_record': + folder_add_lookup[record_uid] = event_ts + elif event_type == 'record_password_change': + if event_ts > pw_change_lookup.get(record_uid, 0): + pw_change_lookup[record_uid] = event_ts + + for record_uid, ts in folder_add_lookup.items(): + created_lookup.setdefault(record_uid, ts) + + if self._config.in_shared_folder: + self._fetch_shared_folder_mappings() + + if self._config.exclude_deleted: + self._fetch_deleted_records() + + for record_uid, ts in created_lookup.items(): + if record_uid in self._records: + rec = self._records[record_uid] + if rec['created_ts'] == 0 or ts < rec['created_ts']: + rec['created_ts'] = ts + + for record_uid, ts in pw_change_lookup.items(): + if record_uid in self._records: + self._records[record_uid]['pw_changed_ts'] = ts + + def _fetch_deleted_records(self) -> None: + """Fetch deleted records from audit events for --exclude-deleted filtering.""" + for event in self._fetch_paginated_audit_events(['record_delete']): + record_uid = event.get('record_uid', '') + if record_uid and record_uid in self._records: + self._records[record_uid]['in_trash'] = True + + def _fetch_shared_folder_mappings(self) -> None: + """Fetch shared folder mappings for --in-shared-folder filtering.""" + for event in self._fetch_paginated_audit_events(['folder_add_record']): + record_uid = event.get('record_uid', '') + shared_folder_uid = event.get('shared_folder_uid', '') + + if record_uid and shared_folder_uid: + if record_uid not in self._record_shared_folders: + self._record_shared_folders[record_uid] = [] + if shared_folder_uid not in self._record_shared_folders[record_uid]: + self._record_shared_folders[record_uid].append(shared_folder_uid) + + def _fetch_records_from_audit_events(self) -> None: + """Fallback: Fetch records from audit events if compliance API fails.""" + for event in self._fetch_paginated_audit_events(['record_add', 'folder_add_record']): + record_uid = event.get('record_uid', '') + if not record_uid: + continue + + event_ts = int(event.get('created', 0)) + username = event.get('username', '') + event_type = event.get('audit_event_type', '') + shared_folder_uid = event.get('shared_folder_uid', '') + + if record_uid not in self._records: + self._records[record_uid] = { + 'record_uid': record_uid, + 'owner_email': username, + 'owner_user_id': 0, + 'title': '', + 'shared': bool(shared_folder_uid), + 'created_ts': event_ts if event_type == 'record_add' else 0, + 'pw_changed_ts': 0, + 'in_trash': False + } + else: + rec = self._records[record_uid] + if event_type == 'record_add' and event_ts > 0: + if rec['created_ts'] == 0 or event_ts < rec['created_ts']: + rec['created_ts'] = event_ts + if username: + rec['owner_email'] = username + if shared_folder_uid: + rec['shared'] = True + if rec['created_ts'] == 0 and event_ts > 0: + rec['created_ts'] = event_ts + + logger.debug(f'Fetched {len(self._records)} records from audit events') + + def _get_record_title_from_vault(self, record_uid: str) -> str: + """Try to get record title from vault.""" + if self._vault is None: + return '' + try: + vault_data = self._vault.vault_data + if vault_data: + record = vault_data.get_record(record_uid) + if record: + return record.title or '' + except Exception: + pass + return '' + + def _enrich_titles_from_vault(self) -> None: + """Enrich records missing titles from vault data.""" + if self._vault is None: + return + for record_uid, data in self._records.items(): + if not data.get('title'): + title = self._get_record_title_from_vault(record_uid) + if title: + data['title'] = title + + def generate_report(self) -> List[AgingReportEntry]: + """Generate the password aging report.""" + cutoff_ts = self._get_cutoff_timestamp() + target_username = self._get_target_username() + + self._build_user_lookups() + + if target_username and target_username not in self._email_to_user_id: + return [] + + user_ids = None + if target_username: + user_id = self._email_to_user_id.get(target_username) + if user_id: + user_ids = [user_id] + + self._fetch_compliance_data(user_ids) + + if not self._records: + self._fetch_records_from_audit_events() + + self._update_timestamps_from_audit_events() + self._enrich_titles_from_vault() + + report_entries: List[AgingReportEntry] = [] + + for record_uid, data in self._records.items(): + owner_email = data.get('owner_email', '') + + if target_username and owner_email.lower() != target_username: + continue + + if self._config.exclude_deleted and data.get('in_trash'): + continue + + record_sfs = self.get_record_sfs(record_uid) + if self._config.in_shared_folder and not record_sfs: + continue + + created_ts = data.get('created_ts', 0) + pw_changed_ts = data.get('pw_changed_ts', 0) + + if (created_ts and created_ts >= cutoff_ts) or (pw_changed_ts and pw_changed_ts >= cutoff_ts): + continue + + ts = pw_changed_ts or created_ts + change_dt = datetime.datetime.fromtimestamp(ts) if ts else None + created_dt = datetime.datetime.fromtimestamp(created_ts) if created_ts else None + + entry = AgingReportEntry( + record_uid=record_uid, + owner_email=owner_email, + title=data.get('title', ''), + last_changed=change_dt, + record_created=created_dt, + shared=data.get('shared', False), + record_url=f'https://{self._config.server}/vault/#detail/{record_uid}', + shared_folder_uid=record_sfs or None, + in_trash=data.get('in_trash', False) + ) + report_entries.append(entry) + + report_entries.sort(key=self._sort_key) + return report_entries + + @staticmethod + def _sort_key(entry: AgingReportEntry) -> Tuple[int, float]: + """Sort key for report entries by date.""" + if entry.last_changed: + return (0, entry.last_changed.timestamp()) + if entry.record_created: + return (1, entry.record_created.timestamp()) + return (2, 0) + + def cleanup(self, enterprise_id: int) -> None: + """Clean up cache if no_cache option is set.""" + if self._config.no_cache: + self.delete_local_cache(enterprise_id) + + def generate_report_rows(self, include_shared_folder: bool = False) -> Iterable[List[Any]]: + """Generate report rows for tabular output.""" + for entry in self.generate_report(): + row = [entry.owner_email, entry.title, entry.last_changed, entry.shared, entry.record_url] + if include_shared_folder: + row.append(entry.shared_folder_uid or '') + yield row + + @staticmethod + def get_headers(include_shared_folder: bool = False) -> List[str]: + """Get column headers for the report.""" + headers = ['owner', 'title', 'password_changed', 'shared', 'record_url'] + if include_shared_folder: + headers.append('shared_folder_uid') + return headers + + +def parse_period(period_str: str) -> Optional[int]: + """Parse period string (e.g., '3m', '10d', '1y') to days. + + Args: + period_str: Period string with format '' where unit is: + - 'd' for days + - 'm' for months (30 days) + - 'y' for years (365 days) + + Returns: + Number of days, or None if parsing fails. + """ + if not period_str or len(period_str.strip()) < 2: + return None + + period_str = period_str.strip().lower() + unit = period_str[-1] + + try: + value = abs(int(period_str[:-1])) + except ValueError: + return None + + multipliers = {'d': 1, 'm': 30, 'y': 365} + multiplier = multipliers.get(unit) + if multiplier is None: + return None + + return value * multiplier + + +def parse_date(date_str: str) -> Optional[datetime.datetime]: + """Parse date string in various formats.""" + formats = ['%Y-%m-%d', '%Y.%m.%d', '%Y/%m/%d', '%m-%d-%Y', '%m.%d.%Y', '%m/%d/%Y'] + for fmt in formats: + try: + return datetime.datetime.strptime(date_str, fmt) + except ValueError: + continue + return None + + +def generate_aging_report( + enterprise_data: enterprise_types.IEnterpriseData, + auth: keeper_auth.KeeperAuth, + period_days: int = DEFAULT_PERIOD_DAYS, + cutoff_date: Optional[datetime.datetime] = None, + username: Optional[str] = None, + exclude_deleted: bool = False, + in_shared_folder: bool = False, + rebuild: bool = False, + server: str = 'keepersecurity.com' +) -> List[AgingReportEntry]: + """Convenience function to generate an aging report.""" + config = AgingReportConfig( + period_days=period_days, + cutoff_date=cutoff_date, + username=username, + exclude_deleted=exclude_deleted, + in_shared_folder=in_shared_folder, + rebuild=rebuild, + server=server + ) + return AgingReportGenerator(enterprise_data, auth, config).generate_report() diff --git a/keepersdk-package/src/keepersdk/enterprise/audit_report.py b/keepersdk-package/src/keepersdk/enterprise/audit_report.py index d8e7ed5d..4612d578 100644 --- a/keepersdk-package/src/keepersdk/enterprise/audit_report.py +++ b/keepersdk-package/src/keepersdk/enterprise/audit_report.py @@ -272,7 +272,6 @@ def execute_audit_report(self) -> Iterable[dict]: SUMMARY_REPORTS = ('hour', 'day', 'week', 'month', 'span') -AGGREGATES = ('occurrences', 'first_created', 'last_created') class SummaryAuditReport(AuditReportCommon): @@ -286,6 +285,7 @@ def __init__(self, auth: keeper_auth.KeeperAuth): @property def summary_type(self) -> str: return self._summary_type + @summary_type.setter def summary_type(self, value: str): if value in SUMMARY_REPORTS: diff --git a/keepersdk-package/src/keepersdk/helpers/config_utils.py b/keepersdk-package/src/keepersdk/helpers/config_utils.py new file mode 100644 index 00000000..5213b328 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/config_utils.py @@ -0,0 +1,20 @@ +from ..proto import pam_pb2 +from ..vault import vault_extensions, vault_online, vault_record +from .. import utils, crypto + +def pam_configuration_create_record_v6(vault: vault_online.VaultOnline, record: vault_record.TypedRecord, folder_uid: str): + if not record.record_uid: + record.record_uid = utils.generate_uid() + + if not record.record_key: + record.record_key = utils.generate_aes_key() + + record_data = vault_extensions.extract_typed_record_data(record) + json_data = record.load_record_data(record_data) + + car = pam_pb2.ConfigurationAddRequest() + car.configurationUid = utils.base64_url_decode(record.record_uid) + car.recordKey = crypto.encrypt_aes_v2(record.record_key, vault.keeper_auth.auth_context.data_key) + car.data = crypto.encrypt_aes_v2(json_data, record.record_key) + + vault.keeper_auth.execute_auth_rest('pam/add_configuration_record', car) diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/__init__.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/__init__.py new file mode 100644 index 00000000..a65e1fa0 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/__init__.py @@ -0,0 +1,307 @@ + +import csv +from enum import Enum +import logging +import sys +import time +import os +from pydantic import BaseModel +from typing import Any, Dict, Optional, Tuple, Union + +from . import dag_utils, dag_crypto, exceptions +from .dag_types import SyncQuery +from .__version__ import __version__ as dag_version + +from ...proto import router_pb2, GraphSync_pb2 + +class ConnectionBase: + + ADD_DATA = "/add_data" + SYNC = "/sync" + + TIMEOUT = 30 + + def __init__(self, + is_device: bool, + logger: Optional[logging.Logger] = None, + log_transactions: Optional[bool] = None, + log_transactions_dir: Optional[str] = None, + use_read_protobuf: bool = False, + use_write_protobuf: bool = False): + + # device is a gateway device if is_device is False then we use user authentication flow + self.is_device = is_device + + if logger is None: + logger = logging.getLogger() + self.logger = logger + + # Debug tool; log transaction to the file + if log_transactions is not None: + self.log_transactions = dag_utils.value_to_boolean(log_transactions) + else: + self.log_transactions: bool = dag_utils.value_to_boolean(os.environ.get("GS_LOG_TRANS", False)) + + self.log_transactions_dir = os.environ.get("GS_LOG_TRANS_DIR", log_transactions_dir) + if self.log_transactions_dir is None: + self.log_transactions_dir = "." + + if self.log_transactions is True: + self.logger.info("keeper-dag transaction logging is ENABLED; " + f"write directory at {self.log_transactions_dir}") + + self.use_read_protobuf = use_read_protobuf + self.use_write_protobuf = use_write_protobuf + + # This should stay none for KSM + self.transmission_key = None + + def close(self): + if hasattr(self, "logger"): + self.logger = None + del self.logger + + def __del__(self): + self.close() + + def log_transaction_path(self, file: str): + return os.path.join(self.log_transactions_dir, f"graph_{file}.csv") + + @staticmethod + def get_record_uid(record: object) -> str: + pass + + @staticmethod + def get_key_bytes(record: object) -> bytes: + pass + + @staticmethod + def get_encrypted_payload_data(encrypted_payload_data: bytes) -> bytes: + try: + router_response = router_pb2.RouterResponse() + router_response.ParseFromString(encrypted_payload_data) + return router_response.encryptedPayload + except Exception as err: + raise Exception(f"Could not parse router response: {err}") + + def rest_call_to_router(self, + http_method: str, + endpoint: str, + agent: str, + payload: Optional[Union[str, bytes]] = None, + retry: int = 5, + retry_wait: float = 10, + throttle_inc_factor: float = 1.5, + timeout: Optional[int] = None, + headers: Optional[Dict] = None) -> Optional[bytes]: + return b"" + + def _endpoint(self, action: str, endpoint: Optional[str] = None) -> str: + + """ + Build the endpoint on the remote site. + + This method will attempt to fix slashes. + + :param action: + :param endpoint: + :return: + """ + + # Make sure endpoint is /path/to/endpoint; starting / and no ending / + if endpoint is not None and endpoint != "": + if isinstance(endpoint, Enum): + endpoint = endpoint.value + + while endpoint.startswith("/"): + endpoint = endpoint[1:] + while endpoint.endswith("/"): + endpoint = endpoint[:-1] + endpoint = "/" + endpoint + else: + endpoint = "" + + while action.startswith("/"): + action = action[1:] + while action.endswith("/"): + action = action[:-1] + action = "/" + action + + base = "/api/device" + if not self.is_device: + base = "/api/user" + + return base + endpoint + action + + def write_transaction_log(self, + agent: str, + endpoint: str, + graph_id: Optional[int] = None, + request: Optional[Any] = None, + response: Optional[Any] = None, + error: Optional[str] = None): + # If log transaction is True, we want to append to the log file. + + if self.log_transactions is True: + + file_name = graph_id + if file_name is None: + file_name = endpoint.replace("/", "_") + + timestamp = time.time() + + if isinstance(request, BaseModel): + request = request.model_dump_json() + elif hasattr(request, "SerializeToString"): + request = request.SerializeToString() + + if isinstance(response, BaseModel): + response = request.model_dump_json() + elif hasattr(response, "SerializeToString"): + response = request.SerializeToString() + + self.logger.info(f"TRANSACTION TIMESTAMP: {timestamp}") + filename = self.log_transaction_path(str(file_name)) + self.logger.debug(f"write to {filename}") + with open(filename, mode='a', newline='') as file: + self.logger.debug("write add_data to transaction log") + writer = csv.writer(file) + writer.writerow([ + timestamp, + sys.argv[0], + endpoint, + agent, + request, + response, + error + ]) + file.close() + + def payload_and_headers(self, payload: Any) -> Tuple[Union[str, bytes], Dict]: + + headers = {} + if isinstance(payload, BaseModel): + self.logger.debug("payload is pydantic") + payload = payload.model_dump_json() + elif hasattr(payload, "SerializeToString"): + self.logger.debug("payload is protobuf") + headers = {'Content-Type': 'application/octet-stream'} + payload = dag_crypto.encrypt_aes(payload.SerializeToString(), self.transmission_key) + else: + raise Exception("Cannot determine if the model is pydantic or protobuf.") + + return payload, headers + + def sync(self, + sync_query: Union[SyncQuery, GraphSync_pb2.GraphSyncQuery], + graph_id: Optional[int] = None, + endpoint: Optional[str] = None, + agent: Optional[str] = None) -> bytes: + + if agent is None: + f"keeper-dag/{dag_version.__version__}" + + endpoint = self._endpoint(ConnectionBase.SYNC, endpoint) + self.logger.debug(f"endpoint {endpoint}") + + try: + sync_query, headers = self.payload_and_headers(sync_query) + payload = self.rest_call_to_router(http_method="POST", + endpoint=endpoint, + agent=agent, + headers=headers, + payload=sync_query) + + if self.use_read_protobuf: + try: + self.logger.debug(f"decrypt payload with transmission key {dag_utils.kotlin_bytes(self.transmission_key)}") + payload = self.get_encrypted_payload_data(payload) + payload = dag_crypto.decrypt_aes(payload, self.transmission_key) + except Exception as err: + self.logger.error(f"Could not decrypt protobuf graph sync response: {type(err)}, {err}") + + self.write_transaction_log( + graph_id=graph_id, + request=sync_query, + response=payload, + agent=agent, + endpoint=endpoint, + error=None + ) + + return payload + + except exceptions.DAGConnectionException as err: + + self.write_transaction_log( + graph_id=graph_id, + request=sync_query, + response=None, + agent=agent, + endpoint=endpoint, + error=str(err) + ) + raise err + except Exception as err: + self.write_transaction_log( + graph_id=graph_id, + request=sync_query, + response=None, + agent=agent, + endpoint=endpoint, + error=str(err) + ) + raise exceptions.DAGException(f"Could not load the DAG structure: {err}") + + def debug_dump(self) -> str: + return "Connection does not allow debug dump." + + def add_data(self, + payload: Union[dag_types.DataPayload, GraphSync_pb2.GraphSyncAddDataRequest], + graph_id: Optional[int] = None, + endpoint: Optional[str] = None, + use_protobuf: bool = False, + agent: Optional[str] = None): + + if agent is None: + f"keeper-dag/{dag_version.__version__}" + + endpoint = self._endpoint(ConnectionBase.ADD_DATA, endpoint) + self.logger.debug(f"endpoint {endpoint}") + + try: + payload, headers = self.payload_and_headers(payload) + self.rest_call_to_router(http_method="POST", + endpoint=endpoint, + payload=payload, + headers=headers, + agent=agent) + + self.write_transaction_log( + graph_id=graph_id, + request=payload, + response=None, + agent=agent, + endpoint=endpoint, + error=None + ) + except exceptions.DAGConnectionException as err: + self.write_transaction_log( + graph_id=graph_id, + request=payload, + response=None, + agent=agent, + endpoint=endpoint, + error=str(err) + ) + raise err + except Exception as err: + self.write_transaction_log( + graph_id=graph_id, + request=payload, + response=None, + agent=agent, + endpoint=endpoint, + error=str(err) + ) + raise exceptions.DAGException(f"Could not create a new DAG structure: {err}") diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/__version__.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/__version__.py new file mode 100644 index 00000000..12ce4098 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/__version__.py @@ -0,0 +1 @@ +__version__ = '1.1.0' # pragma: no cover diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/connection.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/connection.py new file mode 100644 index 00000000..c6bdf812 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/connection.py @@ -0,0 +1,205 @@ +import logging +import os + +import time +from typing import Any, Dict, Optional, Tuple, Union + +import requests +from . import ConnectionBase, dag_utils, exceptions + +from keepersdk.vault import vault_online, vault_record +from keepersdk import crypto, utils +from keepersdk.authentication import endpoint + +class Connection(ConnectionBase): + + def __init__(self, + vault: vault_online.VaultOnline, + verify_ssl: bool = True, + is_ws: bool = False, + logger: Optional[logging.Logger] = None, + log_transactions: Optional[bool] = False, + log_transactions_dir: Optional[str] = None, + use_read_protobuf: bool = False, + use_write_protobuf: bool = False, + **kwargs): + + # Commander uses /api/user; hence is_device=False + super().__init__(is_device=False, + logger=logger, + log_transactions=log_transactions, + log_transactions_dir=log_transactions_dir, + use_read_protobuf=use_read_protobuf, + use_write_protobuf=use_write_protobuf) + + self.vault = vault + self.verify_ssl = dag_utils.value_to_boolean(os.environ.get("VERIFY_SSL", verify_ssl)) + self.is_ws = is_ws + + # Deprecated; setting this will override the per-transaction values. + self.transmission_key = kwargs.get("transmission_key") + self.dep_encrypted_transmission_key = kwargs.get("encrypted_transmission_key") + self.dep_encrypted_session_token = kwargs.get("encrypted_session_token") + + @staticmethod + def get_record_uid(record: vault_record.KeeperRecord) -> str: + return record.record_uid + + @staticmethod + def get_key_bytes(record: vault_record.KeeperRecord) -> bytes: + return record.record_key + + @property + def hostname(self) -> str: + # The host is connect.keepersecurity.com, connect.dev.keepersecurity.com, etc. Append "connect" in front + # of host used for Commander. + configured_host = f'connect.{self.params.config.get("server")}' + + # In GovCloud environments, the router service is not under the govcloud subdomain + if 'govcloud.' in configured_host: + # "connect.govcloud.keepersecurity.com" -> "connect.keepersecurity.com" + configured_host = configured_host.replace('govcloud.', '') + + return os.environ.get("ROUTER_HOST", configured_host) + + @property + def dag_server_url(self) -> str: + + # Allow override of the URL. If not set, get the hostname from the config. + hostname = os.environ.get("KROUTER_URL", self.hostname) + if hostname.startswith('ws') or hostname.startswith('http'): + return hostname + + use_ssl = dag_utils.value_to_boolean(os.environ.get("USE_SSL", True)) + if self.is_ws: + prot_pref = 'ws' + else: + prot_pref = 'http' + if use_ssl is True: + prot_pref += "s" + + return f'{prot_pref}://{hostname}' + + # deprecated + def get_keeper_tokens(self): + self.transmission_key = utils.generate_aes_key() + server_public_key = endpoint.SERVER_PUBLIC_KEYS[self.vault.keeper_auth.keeper_endpoint.server_key_id] + + if self.vault.keeper_auth.keeper_endpoint.server_key_id < 7: + self.dep_encrypted_transmission_key = crypto.encrypt_rsa(self.transmission_key, server_public_key) + else: + self.dep_encrypted_transmission_key = crypto.encrypt_ec(self.transmission_key, server_public_key) + self.dep_encrypted_session_token = crypto.encrypt_aes_v2( + utils.base64_url_decode(self.vault.keeper_auth.auth_context.session_token), self.transmission_key) + + def payload_and_headers(self, payload: Any) -> Tuple[Union[str, bytes], Dict]: + + # If the dep_encrypted_transmission_key, use the set value over the generated ones. + if self.dep_encrypted_transmission_key is not None: + encrypted_transmission_key = self.dep_encrypted_transmission_key + encrypted_session_token = self.dep_encrypted_session_token + + # This is what we want to use; it's different for each call. + else: + # Create a new transmission key + self.transmission_key = utils.generate_aes_key() + self.logger.debug(f"transmission key is {self.transmission_key}") + # self.params.rest_context.transmission_key = self.transmission_key + server_public_key = endpoint.SERVER_PUBLIC_KEYS[self.vault.keeper_auth.keeper_endpoint.server_key_id] + + if self.vault.keeper_auth.keeper_endpoint.server_key_id < 7: + encrypted_transmission_key = crypto.encrypt_rsa(self.transmission_key, server_public_key) + else: + encrypted_transmission_key = crypto.encrypt_ec(self.transmission_key, server_public_key) + encrypted_session_token = crypto.encrypt_aes_v2( + utils.base64_url_decode(self.vault.keeper_auth.auth_context.session_token), self.transmission_key) + + # We need the transmission_key for protobuf sync since it returns values encrypted with the transmission_key. + if self.transmission_key is None: + raise exceptions.DAGConnectionException("The transmission key has not been set. If setting encrypted_transmission_key " + "and encrypted_session_token, also set transmission_key to 32 bytes. " + "Setting the encrypted_transmission_key and encrypted_session_token is " + "deprecated.") + + payload, headers = super().payload_and_headers(payload) + + headers["TransmissionKey"] = utils.base64_url_encode(encrypted_transmission_key) + headers["Authorization"] = f'KeeperUser {utils.base64_url_encode(encrypted_session_token)}' + + return payload, headers + + def rest_call_to_router(self, + http_method: str, + endpoint: str, + agent: str, + payload: Optional[Union[str, bytes]] = None, + retry: int = 5, + retry_wait: float = 10, + throttle_inc_factor: float = 1.5, + timeout: Optional[int] = None, + headers: Optional[Dict] = None) -> Optional[bytes]: + + if timeout is None or timeout == 0: + timeout = Connection.TIMEOUT + + if isinstance(payload, str): + payload = payload.encode() + + if not endpoint.startswith("/"): + endpoint = "/" + endpoint + + url = self.dag_server_url + endpoint + + if headers is None: + headers = {} + + attempt = 0 + while True: + try: + attempt += 1 + self.logger.debug(f"graph web service call to {url} [{attempt}/{retry}]") + response = requests.request( + method=http_method, + url=url, + verify=self.verify_ssl, + headers={ + **headers, + 'User-Agent': agent + }, + data=payload, + timeout=timeout + ) + self.logger.debug(f"response status: {response.status_code}") + response.raise_for_status() + return response.content + + except requests.exceptions.HTTPError as http_err: + + msg = http_err.response.reason + try: + content = http_err.response.content.decode() + if content is not None and content != "": + msg = "; " + content + except (Exception,): + pass + + err_msg = f"{http_err.response.status_code}, {msg}" + + if http_err.response.status_code == 429: + attempt -= 1 + retry_wait *= throttle_inc_factor + self.logger.warning("the connection to the graph service is being throttled, " + f"increasing the delay between retry: {retry_wait} seconds.") + + except Exception as err: + err_msg = str(err) + + self.logger.info(f"call to graph web service {url} had a problem: {err_msg}") + if attempt >= retry: + self.logger.error(f"call to graph web service {url}, after {retry} " + f"attempts, failed!: {err_msg}") + raise exceptions.DAGConnectionException(f"Call to graph web service {url}, after {retry} " + f"attempts, failed!: {err_msg}") + + self.logger.info(f"will retry call after {retry_wait} seconds.") + time.sleep(retry_wait) diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag.py new file mode 100644 index 00000000..ffa5f5aa --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag.py @@ -0,0 +1,1441 @@ + +from enum import Enum +import importlib +import json +import logging +import os +import sys +import traceback +from typing import Any, List, Optional, Tuple, Union + +from . import dag_utils, dag_crypto +from .dag_types import EdgeType, Ref, RefType, ENDPOINT_TO_GRAPH_ID_MAP, DAGData +from .dag_vertex import DAGVertex +from .struct.protobuf import DataStruct as ProtobufDataStruct +from .struct.default import DataStruct as DefaultDataStruct +from .exceptions import (DAGPathException, DAGDataException, DAGKeyException, DAGCorruptException, + DAGVertexException, DAGConfirmException, DAGVertexAlreadyExistsException, DAGEdgeException) +from .connection import ConnectionBase +from .__version__ import __version__ as dag_version +from ... import utils + +QueryValue = Union[list, dict, str, float, int, bool] + +class DAG: + + # Debug level. Increase to get finer debug messages. + DEBUG_LEVEL = 0 + + UID_KEY_BYTES_SIZE = 16 + UID_KEY_STR_SIZE = 22 + + # For the dot output, enum to text. + EDGE_LABEL = { + EdgeType.DATA: "DATA", + EdgeType.KEY: "KEY", + EdgeType.LINK: "LINK", + EdgeType.ACL: "ACL", + EdgeType.DELETION: "DELETION", + } + + def __init__(self, + conn: ConnectionBase, + record: Optional[object] = None, + key_bytes: Optional[bytes] = None, + name: Optional[str] = None, + read_endpoint: Optional[Union[str, Enum]] = None, + write_endpoint: Optional[Union[str, Enum]] = None, + graph_id: Optional[Union[int, Enum]] = None, + auto_save: bool = False, + history_level: int = 0, + logger: Optional[Any] = None, + debug_level: int = 0, + is_dev: bool = False, + vertex_type: RefType = RefType.PAM_NETWORK, + decrypt: bool = True, + fail_on_corrupt: bool = True, + data_requires_encryption: bool = False, + log_prefix: str = "GraphSync", + save_batch_count: Optional[int] = None, + agent: Optional[str] = None, + dedup_edges: bool = False): + + """ + Create a GraphSync instance. + + :param conn: Connection instance + :param record: If set, the key bytes will use the key bytes in the record. Overrides key_bytes. + :param key_bytes: If set, these key bytes will be used. + :param name: Optional name for the graph. + :param read_endpoint: Endpoint for reading from graph. Use this over `graph_id`. (i.e. graph-sync/pam ) + :param write_endpoint: Endpoint for writing to graph. Use this over `graph_id`. (i.e. graph-sync/pam ) + :param graph_id: Graph ID sets which graph to load for the graph. `endpoint` replaces this, but code is + backwards compatiable. + :param auto_save: Automatically save when modifications are performed. Default is False. + :param history_level: How much edge history to keep in memory. Default is 0, no history. + :param logger: Python logger instance to use for logging. + :param debug_level: Debug level; the higher the number will result in more debug information. + :param is_dev: Is the code running in a development environment? + :param vertex_type: The default vertex/ref type for the root vertex, if auto creating. + :param decrypt: Decrypt the graph; Default is TRUE + :param fail_on_corrupt: If unable to decrypt encrypted data, fail out. + :param data_requires_encryption: Data edges are already encrypted. Default is False. + :param log_prefix: Text prepended to the log messages. Handy if dealing with multiple graphs. + :param save_batch_count: The number of edges to save at one time. + :param agent: User Agent to send with web service requests. + :param dedup_edges: Remove modified edges if the same edge added before save. + :return: Instance of GraphSync + """ + + if logger is None: + logger = logging.getLogger() + self.logger = logger + if debug_level is None: + debug_level = int(os.environ.get("GS_DEBUG_LEVEL", os.environ.get("DAG_DEBUG_LEVEL", 0))) + + # Prevent duplicate edges to be added. + # The goal is to prevent unneeded edges. + # If warning is turned on, log dup and stacktrace. + self.dedup_edge = dag_utils.value_to_boolean(os.environ.get("GS_DEDUP_EDGES", dedup_edges)) + self.dedup_edge_warning = dag_utils.value_to_boolean(os.environ.get("GS_DEDUP_EDGES_WARN", False)) + + if self.dedup_edge and auto_save: + raise Exception("Cannot run dedup_edge and auto_save at the same time. The dedup_edge feature only works " + "in bulk saves.") + + self.debug_level = debug_level + self.log_prefix = log_prefix + + if save_batch_count is None or save_batch_count <= 0: + save_batch_count = 0 + self.save_batch_count = save_batch_count + + self.vertex_type = vertex_type + + self.data_requires_encryption = data_requires_encryption + self.decrypt = decrypt + self.fail_on_corrupt = fail_on_corrupt + + gs_is_dev = os.environ.get("GS_IS_DEV", os.environ.get("DAG_IS_DEV")) + if gs_is_dev is not None: + is_dev = dag_utils.value_to_boolean(gs_is_dev) + self.is_dev = is_dev + + # If the record is passed in, use the UID and key bytes from the record. + self.uid = None + if record is not None: + self.uid = conn.get_record_uid(record) + key_bytes = conn.get_key_bytes(record) + + self.key = key_bytes + + if key_bytes is None: + raise ValueError("Either the record or the key_bytes needs to be passed.") + + # If the UID is blank, use the key bytes to generate a UID + if self.uid is None: + self.uid = dag_crypto.generate_uid_str(key_bytes[:16]) + + if graph_id is None and (read_endpoint is None or write_endpoint is None): + raise ValueError("Either graph_id or read/write endpoints needs to be set.") + + # graph_id and endpoint determine how/where the graph is stored on the GraphSync service. + if graph_id is not None: + if isinstance(read_endpoint, Enum): + graph_id = ENDPOINT_TO_GRAPH_ID_MAP.get(read_endpoint.value) + self.graph_id = graph_id + + if read_endpoint is not None: + if isinstance(read_endpoint, Enum): + read_endpoint = read_endpoint.value + if write_endpoint is not None: + if isinstance(write_endpoint, Enum): + write_endpoint = write_endpoint.value + + self.read_endpoint = read_endpoint + self.write_endpoint = write_endpoint + + if name is None: + name = f"{self.log_prefix} ROOT" + self.name = name + + # The order of the vertices is important. + # The order creates the history. + # The web service will order edge by their edge_id + # Store in and array. + # The lookup table to make UID to DAGVertex easier. + # The integer is the index into the array. + self._vertices = [] + self._uid_lookup = {} + + # This is like the batch + self.origin_ref_value = dag_crypto.generate_uid_bytes(16) + self.origin_uid = dag_crypto.generate_uid_str(uid_bytes=self.origin_ref_value) + + # If True, any addition or changes will automatically be saved. + self.auto_save = auto_save + + # To auto save, both allow_auto_save and auto_save needs to be True. + # If the graph has not been saved before and the root vertex has not been connected, + # we want to disable auto save. + self._allow_auto_save = False + + # For big changes, we need a confirmation to save. + self.need_save_confirm = False + + # The last sync point after save. + self.last_sync_point = 0 + + # Amount of history to keep. + # The default is 0, which will keep all history. + # Setting to 1 will only keep the latest edges. + # Settings to 2 will keep the latest and prior edges. + # And so on. + self.history_level = history_level + + # If data was corrupt in the graph, the vertex UID will appear in this list. + self.corrupt_uids = [] + + self.conn = conn + + self.read_struct_obj: Union[ProtobufDataStruct, DefaultDataStruct] = ProtobufDataStruct() \ + if conn.use_read_protobuf else DefaultDataStruct() + self.write_struct_obj: Union[ProtobufDataStruct, DefaultDataStruct] = ProtobufDataStruct() \ + if conn.use_write_protobuf else DefaultDataStruct() + + self.agent = f"keeper-dag/{dag_version.__version__}" + if agent is not None: + self.agent += "; " + agent + + self.debug(f"save batch count is set to {self.save_batch_count}") + if self.is_dev is True: + self.debug("GraphSync is running in a development environment, vertex names will be included.") + self.debug(f"edge de-dup is {self.dedup_edge}", level=1) + self.debug(f"edge de-dup debug warning {self.dedup_edge_warning}", level=1) + self.debug(f"{self.log_prefix} key {self.key}", level=1) + self.debug(f"{self.log_prefix} UID {self.uid}", level=1) + self.debug(f"{self.log_prefix} UID HEX {dag_crypto.urlsafe_str_to_bytes(self.uid).hex()}", level=1) + + def __del__(self): + self.cleanup() + + def cleanup(self): + """ + Explicitly clean up the DAG and break circular references. + + This method allows users to manually trigger cleanup before the object + goes out of scope. This is useful in scenarios where you want to ensure + immediate memory release, such as: + - High-frequency DAG creation/destruction + - Long-running processes + - Memory-constrained environments + + After calling this method, the DAG object should not be used. + + Example: + dag = DAG(conn=conn, key_bytes=key) + # ... use the dag ... + dag.cleanup() # Explicitly clean up + del dag + """ + + try: + # Safely get the root vertex without creating a new one + if hasattr(self, '_vertices') and hasattr(self, 'uid') and hasattr(self, '_uid_lookup'): + if len(self._vertices) > 0 and self.uid in self._uid_lookup: + idx = self._uid_lookup[self.uid] + if idx < len(self._vertices): + root = self._vertices[idx] + if hasattr(root, 'clean_edges'): + root.clean_edges() + except (Exception,): + pass + finally: + # Always attempt to clear these collections, even if clean_edges() fails + try: + if hasattr(self, '_vertices'): + self._vertices.clear() + if hasattr(self, '_uid_lookup'): + self._uid_lookup.clear() + if hasattr(self, 'corrupt_uids'): + self.corrupt_uids.clear() + except (Exception,): + pass + + # Clear all collections to break circular references + self.read_struct_obj = None + del self.read_struct_obj + self.write_struct_obj = None + del self.write_struct_obj + self.conn = None + del self.conn + + def debug(self, msg: str, level: int = 0): + """ + Debug with granularity level. + + If the debug level is greater or equal to the level on the message, the message will be displayed. + + :param msg: Text debug message + :param level: Debug level of message + :return: + """ + + if self.debug_level >= level: + + msg = f"{self.log_prefix}: {msg}" + + if self.logger is not None: + self.logger.debug(msg) + else: + logging.debug(msg) + + def debug_stacktrace(self): + exc = sys.exc_info()[0] + # the last one would be full_stack() + stack = traceback.extract_stack()[:-1] + if exc is not None: + del stack[-1] + trc = 'Traceback (most recent call last):\n' + msg = trc + ''.join(traceback.format_list(stack)) + if exc is not None: + msg += ' ' + traceback.format_exc().lstrip(trc) + self.debug(msg) + + def __str__(self): + ret = f"GraphSync {self.uid}\n" + ret += f" python instance id: {id(self)}\n" + ret += f" name: {self.name}\n" + ret += f" key: {self.key}\n" + ret += f" vertices:\n" + for v in self.all_vertices: + ret += f" * {v.uid}, Keys: {v.keychain}, Active: {v.active}\n" + for e in v.edges: + if e.edge_type == EdgeType.DATA: + ret += f" + has a DATA edge" + if e.content is not None: + ret += ", has content" + else: + ret += f" + belongs to {e.head_uid}, {DAG.EDGE_LABEL.get(e.edge_type)}, {e.content}" + ret += "\n" + + return ret + + @property + def is_corrupt(self): + return len(self.corrupt_uids) > 0 + + @property + def allow_auto_save(self) -> bool: + """ + Return the flag indicating if auto save is allowed. + :return: + """ + + return self._allow_auto_save + + @allow_auto_save.setter + def allow_auto_save(self, value: bool): + """ + Set the ability to auto save. + :param value: True enables, False disables. + :return: + """ + + if value: + self.debug("ability to auto save has been ENABLED", level=2) + else: + self.debug("ability to auto save has been DISABLED", level=2) + + self._allow_auto_save = value + + @property + def origin_ref(self) -> Ref: + + """ + Return an instance of the origin reference for adding data + :return: + """ + + return Ref( + type=RefType.DEVICE, + value=self.origin_uid, + name=self.name if self.is_dev is True else None + ) + + @property + def has_graph(self) -> bool: + """ + Do we have any graph items? + + :return: True if there are vertices. False if no vertices. + """ + + return len(self._vertices) > 0 + + @property + def vertices(self) -> List[DAGVertex]: + """ + Get all active vertices + + :return: List of DAGVertex instance + """ + + return [ + vertex + for vertex in self._vertices + if vertex.active is True + ] + + @property + def all_vertices(self) -> List[DAGVertex]: + """ + Get all vertices + :return: List of DAGVertex instance + """ + + return self._vertices + + def get_vertex(self, key) -> Optional[DAGVertex]: + + """ + Get a single vertex. + + The key can be either a UID, path or name. + + The UID is most reliable since there can only be one per graph. + + The path is second reliable if it is set by the user. + It will find an edge with the path, the vertex that is the edge's tail is returned. + There is no unique constraint for the path. + You can have duplicates. + + The name is third, and not reliable. + The name only exists when the graph is created. + If loaded, the name will be None. + + :param key: A UID, path item, or name of a vertex. + :return: DAGVertex instance, if it exists. + """ + + if key is None: + return None + + # Is the key a UID? If so, return the vertex from the lookup. + if key in self._uid_lookup: + index = self._uid_lookup[key] + return self._vertices[index] + + # Is the key a path? + # We also want to include any deleted edges. + vertices = self.get_vertices_by_path_value(key, inc_deleted=True) + if len(vertices) > 0: + if len(vertices) > 1: + raise DAGPathException("Cannot get vertex using the path. Found multiple vertex that use the path.") + return vertices[0] + + # Is the key a name? This is a last resort. + for vertex in vertices: + if vertex.name == key: + return vertex + + return None + + @property + def get_root(self) -> Optional[DAGVertex]: + """ + Get the root vertex + + If the root vertex does not exist, it will create the vertex with a ref type of PAM_NETWORK. + + :return: + """ + root = self.get_vertex(self.uid) + if root is None: + root = self.add_vertex(uid=self.uid, name=self.name, vertex_type=self.vertex_type) + return root + + def vertex_exists(self, key: str) -> bool: + """ + Check if a vertex identified by the key exists. + :param key: UID, path, or name + :return: + """ + + return self.get_vertex(key) is not None + + def get_vertices_by_path_value(self, path: str, inc_deleted: bool = False) -> List[DAGVertex]: + """ + Find all vertices that have an edge that match the path + :param path: A string path value. This is a path to walk, just the value. + :param inc_deleted: Include deleted edges. + :return: List of DAGVertex + """ + results = [] + if inc_deleted: + vertices = self.all_vertices + else: + vertices = self.vertices + + for vertex in vertices: + for edge in vertex.edges: + if edge.path == path: + results.append(vertex) + return results + + def _sync(self, sync_point: int = 0) -> Tuple[List[DAGData], int]: + + # The web service will send 500 items, if there is more the 'has_more' flag is set to True. + has_more = True + + # Make the web service call to set all the data + all_data = [] + while has_more: + # Load a page worth of items + + sync_query = self.read_struct_obj.sync_query(stream_id=self.uid, + sync_point=sync_point, + graph_id=self.graph_id) + + results = self.read_struct_obj.get_sync_result( + self.conn.sync( + sync_query=sync_query, + graph_id=self.graph_id, + endpoint=self.read_endpoint, + agent=self.agent + )) + + if results.syncPoint == 0: + return all_data, 0 + + all_data += results.data + + # The server will tell us if there is more data to get. + has_more = results.hasMore + + # The sync_point will indicate where we need to start the sync from. Think syncPoint > value + sync_point = results.syncPoint + + return all_data, sync_point + + def _load(self, sync_point: int = 0): + + """ + Load the DAG + + This will clear the existing graph. + It will make web services calls to get the fresh graph, which will return a list of edges. + With the list of edges, it will create vertices and connect them with the edges. + The content of the edges will remain encrypted. The 'encrypted' flag is set to True. + We need the entire graph structure before decrypting. + + We don't have to worry about keys at this point. We are just trying to get structure + and content in the right place. Nothing is decrypted here. + + :param sync_point: Where to load + """ + + # Clear the existing vertices. + self._vertices = [] # type: List[DAGVertex] + self._uid_lookup = {} # type: dict[str, int] + + self.debug("# SYNC THE GRAPH ##################################################################", level=1) + + # Make the web service call to set all the data + all_data, sync_point = self._sync(sync_point=sync_point) + + self.debug(" PROCESS the non-DATA edges", level=2) + + # Process the non-DATA edges + for data in all_data: + + # Skip all the DATA edge + edge_type = EdgeType.find_enum(data.type) + if edge_type == EdgeType.DATA: + continue + + # The ref the tail. It connects to stored in the vertex. + tail_uid = data.ref.value + + # The parentRef is the head. It's the arrowhead on the edge. For DATA edges, it will be None. + head_uid = None + if data.parentRef is not None: + head_uid = data.parentRef.value + + self.debug(f" * edge {edge_type}, tail {tail_uid} to head {head_uid}", level=3) + + # We want to store this edge in the Vertex with the same value/UID as the ref. + if not self.vertex_exists(tail_uid): + self.debug(f" * tail vertex {tail_uid} does not exists. create.", level=3) + self.add_vertex( + uid=tail_uid, + name=data.ref.name, + + # This will be 0/GENERAL right now. We do the lookup just in case things will change in the + # future. + vertex_type=RefType.find_enum(data.ref.type) + ) + + # Get the tail vertex. + tail = self.get_vertex(tail_uid) + + # This most likely is a DELETION edge of a DATA edge. + # Set the head to be the same as the tail. + if head_uid is None: + head_uid = tail_uid + + # If the head vertex doesn't exist, we need to create. + if not self.vertex_exists(head_uid): + self.debug(f" * head vertex {head_uid} does not exists. create.", level=3) + self.add_vertex( + uid=head_uid, + name=data.parentRef.name, + vertex_type=RefType.GENERAL + ) + # Get the head vertex, which will exist now. + head = self.get_vertex(head_uid) + self.debug(f" * tail {tail_uid} belongs to {head_uid}, " + f"edge type {edge_type}", level=3) + + if edge_type == EdgeType.DELETION: + tail.disconnect_from(head) + else: + content = data.content + if content is not None: + if data.content_is_base64: + content = utils.base64_url_decode(content) + + # Connect this vertex to the head vertex. It belongs to that head vertex. + tail.belongs_to( + vertex=head, + edge_type=edge_type, + content=content, + # ACL and LINK edges are not encrypted. + is_encrypted=False, + path=data.path, + modified=False, + from_load=True + ) + + self.debug("", level=2) + self.debug(" PROCESS the DATA edges", level=2) + + # Process the DATA edges + # We don't have to worry about vertex creation since they will all exist. + for data in all_data: + + # Only process the data edges. + edge_type = EdgeType.find_enum(data.type) + if edge_type != EdgeType.DATA: + continue + + # Get the tail vertex. + tail_uid = data.ref.value + # We want to store this edge in the Vertex with the same value/UID as the ref. + if not self.vertex_exists(tail_uid): + self.debug(f" * tail vertex {tail_uid} does not exists. create.", level=3) + self.add_vertex( + uid=tail_uid, + name=data.ref.name, + + # This will be 0/GENERAL right now. We do the lookup just in case things will change in the + # future. + vertex_type=RefType.find_enum(data.ref.type) + ) + tail = self.get_vertex(tail_uid) + + content = data.content + if content is not None: + if data.content_is_base64: + content = utils.base64_url_decode(content) + + self.debug(f" * DATA edge belongs to {tail.uid}", level=3) + tail.add_data( + content=content, + # Assume DATA is encrypted; it might not be but, we will handle that later. + is_encrypted=True, + path=data.path, + modified=False, + from_load=True, + ) + + self.debug("", level=1) + + return sync_point + + def _mark_deletion(self): + + """ + Mark vertices as deleted. + + Check each vertex to see if there is any non-DELETION edge connecting to another vertex. + If there are no edges, then the vertex is flagged at deleted. + + This is done to prevent the edges from being connected to a deleted vertex. + Also, to display deleted vertex in the DOT graph. + :return: + """ + + self.debug(" CHECK dag vertices to see if they are active", level=1) + for vertex in self.all_vertices: + + self.debug(f"check vertex {vertex.uid}", level=3) + found_edge_to_another_vertex = False + for edge in vertex.edges: + # Skip the DELETION and DATA edges. + if edge.edge_type == EdgeType.DELETION or edge.edge_type == EdgeType.DATA: + continue + + # Check if this edge has a matching DELETION edge. + # If it does not, this vertex cannot be deleted. + if not edge.is_deleted: + found_edge_to_another_vertex = True + break + + # If the vertex belongs to no vertex, and it not the root, then flag it for deletion. + if found_edge_to_another_vertex is False and vertex.uid != self.uid: + self.debug(f" * vertex is deleted", level=3) + vertex.active = False + + self.debug("", level=1) + + def _decrypt_keychain(self): + + """ + Decrypt KEY/ACL edges + + Part one is to decrypt the KEY and ACL edges. + To decrypt the edge, we need to walk up the edges until we can no longer. + If we get the point where we can't walk up any farther, we need to use the record key bytes. + While walking up, if we get to a keychain that has been decrypted, we return that keychain. + As we walk back, we can decrypt any keychain that is still encrypted. + The decrypt keychain is set in the vertex. + """ + + self.debug(" DECRYPT the dag KEY edges", level=1) + + def _get_keychain(v): + self.debug(f" * looking at {v.uid}", level=3) + + # If the vertex has a decrypted key, then return it. + if v.has_decrypted_keys is True: + self.debug(" found a decrypted keychain on vertex", level=3) + return v.keychain + + # Else we need KEY/ACL edge and get the key from the vertex that this vertex belongs to + found_key_edge = False + for e in v.edges: + if e.edge_type == EdgeType.KEY: + + self.debug(f" has edge that is a key, check head vertex {e.head_uid}", level=3) + head = self.get_vertex(e.head_uid) + keychain = _get_keychain(head) + + # No need to check if keychain exists. + # At default, it should contain the record bytes if no KEY/ACL edges existed for a vertex. + + self.debug(f" * decrypt {v.uid} with keys {keychain}", level=3) + was_able_to_decrypt = False + + # Try the keys in the keychain. One should be able to decrypt the content. + for key in keychain: + try: + # The edge will contain a single key. + # Adding a key to + self.debug(f" decrypt with key {key}", level=3) + content = dag_crypto.decrypt_aes(e.content, key) + self.debug(f" content {content}", level=3) + v.add_to_keychain(content) + self.debug(f" * vertex {v.uid} keychain is {v.keychain}", level=3) + was_able_to_decrypt = True + found_key_edge = True + break + except (Exception,): + self.debug(f" !! this is not the key", level=3) + + if not was_able_to_decrypt: + + # Flag that the edge is corrupt, flag that the vertex keychain is corrupt, + # and store vertex UID/tail UID. + # If we fail on corrupt keys, then raise exceptions. + e.corrupt = True + v.corrupt = True + self.corrupt_uids.append(v.uid) + if self.fail_on_corrupt: + raise DAGKeyException(f"Could not decrypt vertex {v.uid} keychain for edge path {e.path}") + return [] + + if found_key_edge: + return v.keychain + else: + self.debug(" * using record bytes", level=3) + return [self.key] + + for vertex in self.all_vertices: + if not vertex.has_key: + continue + self.debug(f"vertex {vertex.uid}, {vertex.has_key}, {vertex.has_decrypted_keys}", level=3) + vertex.keychain = _get_keychain(vertex) + self.debug(f" setting keychain to {vertex.keychain}", level=3) + + self.debug("", level=1) + + def _decrypt_data(self): + + """ + Decrypt DATA edges + + At this point, all the vertex should have an encrypted key. + This key is used to decrypt the DATA edge's content. + Walk each vertex and decrypt the DATA edge if there is a DATA edge. + """ + + self.debug(" DECRYPT the dag data", level=1) + for vertex in self.all_vertices: + if not vertex.has_data: + continue + self.debug(f"vertex {vertex.uid}, {vertex.keychain}", level=3) + + for edge in vertex.edges: + if edge.edge_type != EdgeType.DATA: + continue + + # If the vertex/KEY edge that tail is this vertex is corrupt, we cannot decrypt data. + if vertex.corrupt: + self.debug(f"the key for the DATA edge is corrupt for vertex {vertex.uid}; " + "cannot decrypt data.", level=3) + continue + + if not edge.is_encrypted: + raise ValueError("The content has already been decrypted.") + + content = edge.content + + self.debug(f" * enc safe content {content}", level=3) + self.debug(f" * enc {content}, enc key {vertex.keychain}", level=3) + able_to_decrypt = False + + keychain = vertex.keychain + + # Try the keys in the keychain. One should be able to decrypt the content. + for key in keychain: + try: + edge.content = dag_crypto.decrypt_aes(content, key) + able_to_decrypt = True + self.debug(f" * content {edge.content}", level=3) + break + except (Exception,): + self.debug(f" !! this is not the key", level=3) + + if not able_to_decrypt: + + # If the DATA edge requires encryption, throw error if we cannot decrypt. + if self.data_requires_encryption: + self.corrupt_uids.append(vertex.uid) + raise DAGDataException(f"The data edge {vertex.uid} could not be decrypted.") + + edge.content = content + edge.needs_encryption = False + self.debug(f" * edge is not encrypted or key is incorrect.") + + # Change the flag indicating that the content is in decrypted state. + edge.is_encrypted = False + + self.debug("", level=1) + + def _flag_as_not_modified(self): + + """ + Flag all edges a not modified. + + :return: + """ + + for vertex in self.all_vertices: + for edge in vertex.edges: + edge.modified = False + + def load(self, sync_point: int = 0) -> int: + + """ + Load data from the graph. + + The first step is to recreate the structure of the graph. + The second step is mark vertex as deleted. + The third step is to decrypt the KEY/ACL/DATA edges. + Forth is to flag all edges as not modified. + + :return: The sync point of the graph stream + """ + + # During the load, turn off auto save + self.allow_auto_save = False + + self.debug("== LOAD DAG ========================================================================", level=2) + sync_point = self._load(sync_point) + self.debug(f"sync point is {sync_point}") + self._mark_deletion() + if self.decrypt: + self._decrypt_keychain() + self._decrypt_data() + else: + self.logger.info("the DAG has not been decrypted, the decrypt flag was get to False") + self._flag_as_not_modified() + self.debug("====================================================================================", level=2) + + # We have loaded the graph, enable the ability to use auto save. + self.allow_auto_save = True + + self.last_sync_point = sync_point + + return sync_point + + def _make_delta_graph(self, duplicate_data: bool = True): + + self.debug("DELTA GRAPH", level=3) + modified_vertices = [] + for vertex in self.all_vertices: + found_modification = False + for edge in vertex.edges: + if edge.skip_on_save: + continue + if edge.modified: + found_modification = True + break + if found_modification: + modified_vertices.append(vertex) + if len(modified_vertices) == 0: + self.debug("nothing has been modified") + return + + self.debug(f"has {len(modified_vertices)} vertices", level=3) + + def _flag(v: DAGVertex): + + self.debug(f"check vertex {v.uid}", level=3) + if v.uid == self.uid: + self.debug(f" FOUND ROOT", level=3) + return True + + # Check if we have any of these edges in this order. + found_path = False + for edge_type in [EdgeType.KEY, EdgeType.ACL, EdgeType.LINK]: + seen = {} + for e in v.edges: + self.debug(f" checking {e.edge_type}, {v.uid} to {e.head_uid}", level=3) + is_deletion = None + if e.edge_type == edge_type: + self.debug(f" found {edge_type}", level=3) + next_vertex = self.get_vertex(e.head_uid) + + if is_deletion is None: + + # If the most recent edge a DELETION edge? + version, highest_edge = v.get_highest_edge_version(next_vertex.uid) + is_deletion = highest_edge.edge_type == EdgeType.DELETION + if is_deletion: + self.debug(f" highest deletion edge. will not mark any edges as modified", + level=3) + + found_path = _flag(next_vertex) + if found_path is True and seen.get(e.head_uid) is None: + self.debug(f" setting {v.uid}, {edge_type} active", level=3) + if not is_deletion: + e.modified = True + seen[e.head_uid] = True + else: + self.debug(f" edge is not {edge_type}", level=3) + + if found_path is True: + break + + # If we found a path, we may need to duplicate the DATA edge. + if found_path is True and duplicate_data is True: + for e in v.edges: + if e.edge_type == EdgeType.DATA: + e.modified = True + break + + return found_path + + self.logger.debug("BEGIN delta graph edge detection") + for modified_vertex in modified_vertices: + _flag(modified_vertex) + self.logger.debug("END delta graph edge detection") + + def save(self, confirm: bool = False, delta_graph: bool = False): + + """ + Save the graph + + We will not save if using the default graph. + + The save process will only save edges that have been flagged as modified, or are newly added. + The process will get the edges from all vertices. + The UID of the vertex is the tail UID of the edge. + For DATA edges, the key (first key in the keychain) will be used for encryption. + + If the web service takes too long or hangs, the batch_count can be used to reduce the amount the web service + needs to handle per request. If set to None or non-postivie value, it will not send in batches. + + :param confirm: Confirm save. + Only need this when deleting all vertices. + :param delta_graph: Make a standalone graph from the modifications. + Use sync points to load this graph. + + :return: + """ + + self.debug("== SAVE GRAPH ========================================================================", level=2) + + if self.is_corrupt: + self.logger.error(f"the graph is corrupt, there are problem UIDs: {','.join(self.corrupt_uids)}") + raise DAGCorruptException(f"Cannot save. Graph steam uid {self.uid}, " + f"graph {self.write_endpoint}:{self.graph_id} " + f"has corrupt vertices: {','.join(self.corrupt_uids)}") + + root_vertex = self.get_vertex(self.uid) + if root_vertex is None: + raise DAGVertexException("Cannot save. Could not find the root vertex.") + + if root_vertex.vertex_type != RefType.PAM_NETWORK and root_vertex.vertex_type != RefType.PAM_USER: + raise DAGVertexException("Cannot save. Root vertex type needs to be PAM_NETWORK or PAM_USER.") + + # Do we need to the 'confirm' parameter set to True? + # This is needed if the entire graph is being deleted. + if self.need_save_confirm is True and confirm is False: + raise DAGConfirmException("Cannot save. Confirmation is required.") + self.need_save_confirm = False + + if delta_graph: + self._make_delta_graph() + + data_list = [] + + def _add_data(vertex): + self.debug(f"processing vertex {vertex.uid}, key {vertex.key}, type {vertex.vertex_type}", level=3) + # The vertex UID and edge tail UID + uid = vertex.uid + for edge in vertex.edges: + + if edge.skip_on_save: + continue + + self.debug(f" * edge {edge.edge_type.value}, head {edge.head_uid}, tail {vertex.uid}", level=3) + + # If this edge is not modified, don't add to the data list to save. + if not edge.modified: + self.debug(f" not modified, not saving.", level=3) + continue + + content = edge.content + + # If we are decrypting the edge data, then we want to encrypt it when we save. + # Else, save the content as it is. + if self.decrypt: + if edge.edge_type == EdgeType.DATA: + self.debug(f" edge is data, encrypt data: {edge.needs_encryption}", level=3) + if isinstance(content, dict): + content = json.dumps(content).encode() + if isinstance(content, str): + content = content.encode() + + # If individual edges require encryption or all DATA edge require encryption, then encrypt + if edge.needs_encryption is True or self.data_requires_encryption is True: + self.debug(f" content {edge.content}, enc key {vertex.key}", level=3) + content = dag_crypto.encrypt_aes(content, vertex.key) + self.debug(f" enc content {content}", level=3) + + self.debug(f" enc safe content {content}", level=3) + elif edge.edge_type == EdgeType.KEY: + self.debug(f" edge is key or acl, encrypt key", level=3) + head_vertex = self.get_vertex(edge.head_uid) + key = head_vertex.key + if key is None: + self.debug(f" the edges head vertex {edge.head_uid} did not have a key. " + "using root dag key.", level=3) + key = self.key + self.debug(f" key {vertex.key}, enc key {key}", level=3) + content = dag_crypto.encrypt_aes(vertex.key, key) + elif edge.edge_type == EdgeType.ACL: + content = edge.content + else: + self.debug(f" edge is {edge.edge_type}", level=3) + + parent_vertex = self.get_vertex(edge.head_uid) + + if content is not None and len(content) > 65_535: + self.debug(f" !! vertex {vertex.uid} data edge is {len(content)}, over 64K.") + raise DAGDataException(f"vertex {vertex.uid} DATA edge is {len(content)} bytes. " + "This is too large for the MySQL BLOB (64K).") + + dag_data = self.write_struct_obj.data( + data_type=edge.edge_type, + content=content, + tail_uid=uid, + tail_ref_type=vertex.vertex_type, + tail_name=vertex.name if self.is_dev is True else None, + head_uid=edge.head_uid, + head_ref_type=parent_vertex.vertex_type, + head_name=parent_vertex.name if self.is_dev is True else None, + path=edge.path) + + data_list.append(dag_data) + + # Flag that this edge is no longer modified. + edge.modified = False + + # Add the root vertex first + _add_data(self.get_root) + + # Add the rest. + # Only add is the skip_save is False. + for v in self.all_vertices: + if v.skip_save is False: + if v.uid != self.uid: + _add_data(v) + + # Save the keys before the data. + # This is done to make sure the web service can figure out the stream id. + # By saving the keys before data, the structure of the graph is formed. + if len(data_list) > 0: + + if self.debug_level >= 4: + + self.debug("EDGE LIST") + self.debug("##############################################") + for data in data_list: + self.debug(f"{data.ref.value} -> {data.parentRef.value} ({data.type})") + self.debug("##############################################") + + self.debug(f"total list has {len(data_list)} items", level=0) + self.debug(f"batch {self.save_batch_count} edges", level=0) + + batch_num = 0 + while len(data_list) > 0: + + # If using batch add, then take the first batch_count items. + # Remove them from the data list + if self.save_batch_count > 0: + batch_list = data_list[:self.save_batch_count] + data_list = data_list[self.save_batch_count:] + + # Else take everything and clear the data list (else infinite loop) + else: + batch_list = data_list + data_list = [] + + # Little sanity check + if len(batch_list) == 0: + break + + self.debug(f"adding {len(batch_list)} edges, batch {batch_num}", level=0) + + payload = self.write_struct_obj.payload( + origin_ref=self.write_struct_obj.origin_ref( + origin_ref_value=self.origin_ref_value, + name=self.name if self.is_dev is True else None + ), + data_list=batch_list, + graph_id=self.graph_id + ) + + try: + self.conn.add_data(payload, + graph_id=self.graph_id, + endpoint=self.write_endpoint, + agent=self.agent) + except Exception as err: + self.logger.error(f"could not add data to graph for batch {batch_num}: {err}") + raise err + + batch_num += 1 + + # It's a POST that returns no data + else: + self.debug("data list was empty, not saving.", level=2) + + self.debug("====================================================================================", level=2) + + def do_auto_save(self): + # If allow_auto_save is False, we will not allow auto saving. + # On newly created graph, this will happen if the root vertex has not been connected. + # The root vertex/disconnect edge head is needed to get a proper stream ID. + if not self.allow_auto_save: + self.debug("cannot auto_save, allow_auto_save is False.", level=3) + return + if self.auto_save: + self.debug("... dag auto saving", level=1) + self.save() + + def add_vertex(self, name: Optional[str] = None, uid: Optional[str] = None, keychain: Optional[List[bytes]] = None, + vertex_type: RefType = RefType.GENERAL) -> DAGVertex: + + """ + Add a vertex to the graph. + + :param name: Name for the vertex. + :param uid: String unique identifier. + It's a 16bit hex value that is base64 encoded. + :param keychain: List if key bytes to use for encryption/description. This is set by the load/save method. + :param vertex_type: A RefType enumeration type. If blank, it will default to GENERAL. + :return: + """ + + if name is None: + name = uid + + vertex = DAGVertex( + name=name, + dag=self, + uid=uid, + keychain=keychain, + vertex_type=vertex_type + ) + if self.vertex_exists(vertex.uid): + raise DAGVertexAlreadyExistsException(f"Vertex {vertex.uid} already exists.") + + # Set the UID to array index lookup. + # This is where the vertex will be in the vertices list. + # Then append the vertex to the vertices list. + self._uid_lookup[vertex.uid] = len(self._vertices) + self._vertices.append(vertex) + + return vertex + + @property + def is_modified(self) -> bool: + for vertex in self.all_vertices: + for edge in vertex.edges: + if edge.modified is True: + return True + return False + + @property + def modified_edges(self): + edges = [] + for vertex in self.all_vertices: + for edge in vertex.edges: + if edge.modified is True: + edges.append(edge) + return edges + + def delete(self): + """ + Delete the entire graph. + + This will delete all the vertex, which will delete all the edges. + This will not automatically save. + The save method will need to be called. + The save will require the 'confirm' parameter to be set to True. + :return: + """ + for vertex in self.vertices: + vertex.delete() + self.need_save_confirm = True + + def _search(self, content: Any, value: QueryValue, ignore_case: bool = False): + + if isinstance(value, dict): + # If the object is not a dictionary, then it's not match + if not isinstance(content, dict): + return False + for next_key, next_value in value.items(): + if next_key not in content: + return False + if not self._search(content=content[next_key], + value=next_value, + ignore_case=ignore_case): + return False + return True + elif isinstance(value, list): + # If the object is not a dictionary, then it's not match + for next_value in value: + if self._search(content=content, + value=next_value, + ignore_case=ignore_case): + return True + return False + else: + content = str(content) + value = str(value) + if ignore_case: + content = content.lower() + value = value.lower() + + return value in content + + def search_content(self, query, ignore_case: bool = False): + results = [] + for vertex in self.vertices: + if vertex.has_data is False or vertex.active is False: + continue + content = vertex.content + if isinstance(query, bytes): + if query == content: + results.append(vertex) + elif isinstance(query, str): + try: + content = content.decode() + if query in content: + results.append(vertex) + continue + + except (Exception,): + pass + elif isinstance(query, dict): + try: + content = content.decode() + content = json.loads(content) + search_result = self._search(content, value=query, ignore_case=ignore_case) + if search_result: + results.append(vertex) + except (Exception,): + pass + else: + raise ValueError("Query is not an accepted type.") + return results + + def walk_down_path(self, path: Union[str, List[str]]) -> Optional[DAGVertex]: + + """ + Walk the vertices using the path and return the vertex starting at root vertex. + + :param path: An array of path string, or string where the path is joined with a "/" (i.e., think URL) + :return: DAGVertex is the path completes, None is failure. + """ + + self.debug("walking path starting at the root vertex", level=2) + vertex = self.get_vertex(self.uid) + return vertex.walk_down_path(path) + + def edge_count(self, only_active: bool = True, only_modified: bool = False) -> int: + """ + Return number of edges in graph. + + Edges that have neen flags as dups will not be inclued. + + :param only_active: Default True. If True, only edges that are active (not DELETION) are counted. + :param only_modified: Default False. If Trtue, only edges that are new or have been modified are counted. + :return: + """ + + count = 0 + for v in self.all_vertices: + for e in v.edges: + if e.skip_on_save: + continue + if (only_active and not e.active) or (only_modified and not e.modified): + continue + count += 1 + + return count + + def to_dot(self, graph_format: str = "svg", show_hex_uid: bool = False, + show_version: bool = True, show_only_active: bool = False): + + """ + Generate a graphviz Gigraph in DOT format that is marked up. + + :param graph_format: + :param show_hex_uid: + :param show_version: + :param show_only_active: + :return: + """ + + try: + mod = importlib.import_module("graphviz") + except ImportError: + raise Exception("Cannot to_dot(), graphviz module is not installed.") + + dot = getattr(mod, "Digraph")(comment=f"GraphSync for {self.name}", format=graph_format) + dot.attr(rankdir='BT') + + for v in self._vertices: + if show_only_active is True and v.active is False: + continue + if not v.corrupt: + fillcolor = "white" + if not v.active: + fillcolor = "grey" + label = f"uid={v.uid}" + if v.name is not None and v.name != v.uid: + label += f"\\nname={v.name}" + if show_hex_uid: + label += f"\\nhex={dag_crypto.urlsafe_str_to_bytes(v.uid).hex()}" + else: + fillcolor = "red" + label = f"{v.uid} (CORRUPT)" + + dot.node(v.uid, label, fillcolor=fillcolor, style="filled") + for edge in v.edges: + if edge.skip_on_save: + continue + + if not edge.corrupt: + color = "grey" + style = "solid" + + # To reduce the number of edges, only show the active edges + if edge.active: + color = "black" + style = "bold" + elif show_only_active: + continue + + # If the vertex is not active, gray out the DATA edge + if not edge.active: + color = "grey" + + if edge.edge_type == EdgeType.DELETION: + style = "dotted" + + label = DAG.EDGE_LABEL.get(edge.edge_type) + if label is None: + label = "UNK" + if edge.path is not None and edge.path != "": + label += f"\\npath={edge.path}" + if show_version: + label += f"\\ne{edge.version}" + # tail, head (arrow side), label + else: + color = "red" + style = "solid" + label = f"{DAG.EDGE_LABEL.get(edge.edge_type)} (CORRUPT)" + + dot.edge(v.uid, edge.head_uid, label, style=style, fontcolor=color, color=color) + + return dot + + def to_dot_raw(self, graph_format: str = "svg", sync_point: int = 0, rank_dir="BT"): + + """ + Generate a graphviz Gigraph in DOT format that is not (heavily) marked up. + + :param graph_format: + :param sync_point: + :param rank_dir: + :return: + """ + + try: + mod = importlib.import_module("graphviz") + except ImportError: + raise Exception("Cannot to_dot(), graphviz module is not installed.") + + dot = getattr(mod, "Digraph")(comment=f"GraphSync for {self.name}", format=graph_format) + dot.attr(rankdir=rank_dir) + + all_data, sync_point = self._sync(sync_point=sync_point) + + for edge in all_data: + edge_type = edge.type + tail_uid = edge.ref.value + dot.node(tail_uid, tail_uid) + if edge.parentRef is not None: + head_uid = edge.parentRef.value + dot.edge(tail_uid, head_uid, edge_type) + else: + dot.edge(tail_uid, tail_uid, edge_type) + return dot diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_crypto.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_crypto.py new file mode 100644 index 00000000..890e23ca --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_crypto.py @@ -0,0 +1,52 @@ + +import base64 +from typing import Optional, Union +from cryptography.hazmat.primitives.ciphers.aead import AESGCM +import os + + +def encrypt_aes(data: bytes, key: bytes, iv: bytes = None) -> bytes: + aesgcm = AESGCM(key) + iv = iv or os.urandom(12) + enc = aesgcm.encrypt(iv, data, None) + return iv + enc + + +def decrypt_aes(data: bytes, key: bytes) -> bytes: + aesgcm = AESGCM(key) + return aesgcm.decrypt(data[:12], data[12:], None) + + +def bytes_to_urlsafe_str(b: Union[str, bytes]) -> str: + """ + Convert bytes to a URL-safe base64 encoded string. + + Args: + b (bytes): The bytes to be encoded. + + Returns: + str: The URL-safe base64 encoded representation of the input bytes. + """ + if isinstance(b, str): + b = b.encode() + + return base64.urlsafe_b64encode(b).decode().rstrip('=') + + +def generate_random_bytes(length: int) -> bytes: + return os.urandom(length) + + +def generate_uid_bytes(length: int = 16) -> bytes: + return generate_random_bytes(length) + + +def generate_uid_str(uid_bytes: Optional[bytes] = None) -> str: + if uid_bytes is None: + uid_bytes = generate_uid_bytes() + return bytes_to_urlsafe_str(uid_bytes) + + +def urlsafe_str_to_bytes(s: str) -> bytes: + b = base64.urlsafe_b64decode(s + '==') + return b \ No newline at end of file diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_edge.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_edge.py new file mode 100644 index 00000000..408c67ea --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_edge.py @@ -0,0 +1,285 @@ +import json +import logging +from typing import Optional, Union, Any, TYPE_CHECKING + +import pydantic +from ..keeper_dag.dag_types import EdgeType +from ..keeper_dag.exceptions import DAGContentException + +if TYPE_CHECKING: + from ..keeper_dag.dag_vertex import DAGVertex + +class DAGEdge: + def __init__(self, + vertex: "DAGVertex", + edge_type: EdgeType, + head_uid: str, + version: int = 0, + content: Optional[Any] = None, + path: Optional[str] = None, + modified: bool = True, + block_content_auto_save: bool = False, + is_serialized: bool = False, + is_encrypted: bool = False, + needs_encryption: bool = False): + """ + Create an instance of DAGEdge. + + A primary key of the edge the vertex UID, the head UID, and edge_type. + + :param vertex: The DAGVertex instance that owns these edges. + :param edge_type: The enumeration EdgeType. Indicate the type of the edge. + :param head_uid: The vertex uid that has this edge's vertex. The vertex uid that the edge arrow points at. + :param version: Version of this edge. + :param content: The content of this edge. + :param path: Short tag about this edge. Do + :param modified: + :param block_content_auto_save: + :param from_load: Is this being called from the load() method? + :param is_serialized: From the load, is the content serialized from to a base64 string? + :param needs_encryption: Flag to indicate if the content needs to be encrypted. + :return: An instance of DAGEdge + """ + + # This is the vertex that owns this edge. + self.vertex = vertex + self.edge_type = edge_type + self.head_uid = head_uid + + # Flag to indicate if the edge has been modified. Used to determine if the edge should be part of saved data. + # Set this before setting the content, else setting the content will cause an auto save. + self._modified = None + self.modified = modified + + # Should this edge be skipped when saving. + # This could happen if we create a duplicate new or modified edge. + # We want to only save the newest duplicated edge, so skip prior ones. + self.skip_on_save: bool = False + + # Block auto save in the content setter. + # When creating an edge, don't save until the edge is added to the edge list. + self.block_content_auto_save = block_content_auto_save + + # Does this edge's content need encryption? + self.needs_encryption = needs_encryption + + # If the content is being populated from a the load() method, and the edge type is a KEY or DATA, then the + # content will be encrypted (str). + # We want to keep a str, unless KEYs are decrypted. + + # If the edge data need encryption, is _content, currently encrypted. + self.is_encrypted = is_encrypted + + # If the content could not be decrypted, set + self.corrupt = False + + # Is the content base64 encoded? + # For JSON non-DATA edges, it will be deserialized. + # For JSON DATA edges, it will be serialized and the decryption will deserialize it. + # For Protobuf all edges are deserialized; Protobuf does not serialize bytes. + self.is_serialized = is_serialized + + self._content = None # type: Optional[Any] + self.content = content + self.path = path + + self.version = version + + # If a higher version edge exists, this will be False. + # If True, this is the highest edge. + self.active = True + + def __str__(self) -> str: + return f"" + + def debug(self, msg, level=0): + self.vertex.dag.debug(msg, level=level) + + @property + def modified(self): + return self._modified + + @modified.setter + def modified(self, value): + if value is True: + self.debug(f"vertex {self.vertex.uid}, type {self.vertex.dag.__class__.EDGE_LABEL.get(self.edge_type)}, " + f"head {self.head_uid} has been modified", level=5) + else: + self.debug(f"vertex {self.vertex.uid}, type {self.vertex.dag.__class__.EDGE_LABEL.get(self.edge_type)}, " + f"head {self.head_uid} had modified RESET", level=5) + self._modified = value + + @property + def content(self) -> Optional[Union[str, bytes]]: + """ + Get the content of the edge. + + If the content is a str, then the content is encrypted. + """ + + return self._content + + @property + def content_as_dict(self) -> Optional[dict]: + """ + Get the content from the DATA edge as a dictionary. + :return: Content as a dictionary. + """ + + if self.is_encrypted: + raise DAGContentException("The content is still encrypted.") + + content = self.content + if content is not None: + try: + content = json.loads(content) + except Exception as err: + raise DAGContentException(f"Cannot decode JSON. Is the content a dictionary? : {err}") + return content + + @property + def content_as_str(self) -> Optional[str]: + """ + Get the content from the DATA edge as string + :return: + """ + + if self.is_encrypted: + raise DAGContentException("The content is still encrypted.") + + content = self.content + try: + content = content.decode() + except (Exception,): + pass + return content + + def content_as_object(self, + meta_class: pydantic._internal._model_construction.ModelMetaclass) -> Optional[pydantic.BaseModel]: + """ + Get the content as a pydantic based object. + + :param meta_class: The class to return + :return: + """ + + if self.is_encrypted: + raise DAGContentException("The content is still encrypted.") + + content = self.content_as_str + if content is not None: + content = meta_class.model_validate_json(self.content_as_str) + return content + + @content.setter + def content(self, value: Any): + + """ + Set the content in the edge. + + The content should be stored as bytes. + If the encrypted flag is set, the content will be stored as is. + Content that is a str type is encrypted data (A Base64, AES encrypted bytes, str) + """ + + self.debug(f"vertex {self.vertex.uid}, type {self.vertex.dag.__class__.EDGE_LABEL.get(self.edge_type)}, " + f"head {self.head_uid} setting content", level=2) + + # If the data is encrypted, set it. + # Don't try to make it bytes. + # Also don't set the modified flag to True. + if self.is_encrypted: + self.debug(" content is encrypted.", level=3) + self._content = value + return + + if self._content is not None: + raise DAGContentException("Cannot update existing content. Use add_data() to change the content.") + + if isinstance(value, dict): + value = json.dumps(value) + + # Is this a Pydantic based class? + if hasattr(value, "model_dump_json"): + value = value.model_dump_json() + + if isinstance(value, str): + value = value.encode() + + self._content = value + + def delete(self): + """ + Delete the edge. + + Deleting an edge does not remove the existing edge. + It will create another edge with the same tail and head, but will be type DELETION. + """ + + # If already inactive, return + if not self.active: + return + + version, _ = self.vertex.get_highest_edge_version(head_uid=self.head_uid) + + # Flag all other edges as inactive. + for edge in self.vertex.edges: + edge.active = False + if self.vertex.dag.dedup_edge and edge.modified: + edge.skip_on_save = True + + # Check if the all the edges for this vertex are all newly added/modified for the session. + # If they are all new, then we don't need to delete anything; we just don't add the edges. + all_modified_edges = True + for edge in self.vertex.edges: + if not edge.modified: + all_modified_edges = False + break + + # If not all the edges are modified, then we actually want to add a deletion edge. + # There is actually something to delete. + if not all_modified_edges: + self.vertex.edges.append( + DAGEdge( + vertex=self.vertex, + edge_type=EdgeType.DELETION, + head_uid=self.head_uid, + version=version + 1 + ) + ) + + # Perform the DELETION edges save in one batch. + # Get the current allowed auto save state, and disable auto save. + current_allow_auto_save = self.vertex.dag.allow_auto_save + self.vertex.dag.allow_auto_save = False + + if not self.vertex.belongs_to_a_vertex: + self.vertex.delete(ignore_vertex=self.vertex) + + self.vertex.dag.allow_auto_save = current_allow_auto_save + self.vertex.dag.do_auto_save() + + @property + def is_deleted(self) -> bool: + """ + Does this edge have a DELETION edge that has the same head? + + This should be used to check in a non-DELETION edge type has a matching DELETION edge. + :return: + """ + + # We shouldn't be checking the DELETION edge if it deleted. + # Throw some info message to make sure the coder knows their code might be something foolish. + if self.edge_type == EdgeType.DELETION: + logging.info(f"The edge is_deleted() just check if the DELETION edge is DELETION " + f"for vertex {self.vertex.uid}, head UID {self.head_uid}. Returned True, but code should " + "not be checking this edge.") + return True + + # Check the other edges for this vertex for an active DELETION-edge type. + for edge in self.vertex.edges: + if edge.edge_type == EdgeType.DELETION and edge.head_uid == self.head_uid and edge.active is True: + return True + + return False diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_types.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_types.py new file mode 100644 index 00000000..d5f45dc5 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_types.py @@ -0,0 +1,159 @@ + +from enum import Enum +from pydantic import BaseModel +from typing import List, Optional, Union + + +class BaseEnum(Enum): + + @classmethod + def find_enum(cls, value: Union[Enum, str, int], default: Optional[Enum] = None): + if value is not None: + for e in cls: + if e == value or e.value == value: + return e + if hasattr(cls, str(value).upper()): + return getattr(cls, value.upper()) + return default + + +class PamGraphId(BaseEnum): + PAM = 0 + DISCOVERY_RULES = 10 + DISCOVERY_JOBS = 11 + INFRASTRUCTURE = 12 + SERVICE_LINKS = 13 + + +class PamEndpoints(BaseEnum): + PAM = "/graph-sync/pam" + DISCOVERY_RULES = "/graph-sync/discovery_rules" + DISCOVERY_JOBS = "/graph-sync/discovery_jobs" + INFRASTRUCTURE = "/graph-sync/infrastructure" + SERVICE_LINKS = "/graph-sync/service_links" + + +ENDPOINT_TO_GRAPH_ID_MAP = { + PamEndpoints.PAM.value: PamGraphId.PAM.value, + PamEndpoints.DISCOVERY_RULES.value: PamGraphId.DISCOVERY_RULES.value, + PamEndpoints.DISCOVERY_JOBS.value: PamGraphId.DISCOVERY_JOBS.value, + PamEndpoints.INFRASTRUCTURE.value: PamGraphId.INFRASTRUCTURE.value, + PamEndpoints.SERVICE_LINKS.value: PamGraphId.SERVICE_LINKS.value, +} + +class SyncQuery(BaseModel): + streamId: Optional[str] = None # base64 of a user's ID who is syncing. + deviceId: Optional[str] = None + syncPoint: Optional[int] = None + graphId: Optional[int] = 0 + + + + +class RefType(BaseEnum): + # 0 + GENERAL = "general" + # 1 + USER = "user" + # 2 + DEVICE = "device" + # 3 + REC = "rec" + # 4 + FOLDER = "folder" + # 5 + TEAM = "team" + # 6 + ENTERPRISE = "enterprise" + # 7 + PAM_DIRECTORY = "pam_directory" + # 8 + PAM_MACHINE = "pam_machine" + # 9 + PAM_DATABASE = "pam_database" + # 10 + PAM_USER = "pam_user" + # 11 + PAM_NETWORK = "pam_network" + # 12 + PAM_BROWSER = "pam_browser" + # 13 + CONNECTION = "connetion" + # 14 + WORKFLOW = "workflow" + # 15 + NOTIFICATION = "notification" + # 16 + USER_INFO = "user_info" + # 17 + TEAM_INFO = "team_info" + # 18 + ROLE = "role" + + def __str__(self): + return self.value + + +class EdgeType(BaseEnum): + + """ + DAG data type enum + + * DATA - encrypted data + * KEY - encrypted key + * LINK - like a key, but not encrypted + * ACL - unencrypted set of access control flags + * DELETION - removal of the previous edge at the same coordinates + * DENIAL - an element that was shared through graph relationship, can be explicitly denied + * UNDENIAL - negates the effect of denial, bringing back the share + + """ + DATA = "data" + KEY = "key" + LINK = "link" + ACL = "acl" + DELETION = "deletion" + DENIAL = "denial" + UNDENIAL = "undenial" + + # To store discovery, you would need data and key. To store relationships between records after the discovery + # data was converted, you use Link. + + def __str__(self) -> str: + return str(self.value) + + +class Ref(BaseModel): + type: RefType + value: str + name: Optional[str] = None + + +class DAGData(BaseModel): + type: EdgeType + ref: Ref + parentRef: Optional[Ref] = None + content: Optional[str] = None + path: Optional[str] = None + + +class DataPayload(BaseModel): + origin: Ref + dataList: List + graphId: Optional[int] = 0 + + +class SyncDataItem(BaseModel): + ref: Ref + parentRef: Optional[Ref] = None + content: Optional[str] = None + content_is_base64: bool = True + type: Optional[str] = None + path: Optional[str] = None + deletion: Optional[bool] = False + + +class SyncData(BaseModel): + syncPoint: int + data: List[SyncDataItem] + hasMore: bool diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_utils.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_utils.py new file mode 100644 index 00000000..872544c4 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_utils.py @@ -0,0 +1,13 @@ +def value_to_boolean(value): + value = str(value) + if value.lower() in ['true', 'yes', 'on', '1']: + return True + elif value.lower() in ['false', 'no', 'off', '0']: + return False + else: + return None + + +def kotlin_bytes(data: bytes): + return [b if b < 128 else b - 256 for b in data] + diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_vertex.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_vertex.py new file mode 100644 index 00000000..467760c9 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/dag_vertex.py @@ -0,0 +1,913 @@ +from typing import Optional, Union, List, Any, Tuple, TYPE_CHECKING + +import pydantic +from .dag_types import EdgeType, RefType +from . import dag_crypto +from .dag_edge import DAGEdge +from .exceptions import DAGVertexException, DAGDeletionException, DAGIllegalEdgeException, DAGKeyException + +if TYPE_CHECKING: + from .dag import DAG + + +class DAGVertex: + + def __init__(self, dag: "DAG", uid: Optional[str] = None, name: Optional[str] = None, + keychain: Optional[bytes] = None, vertex_type: RefType = RefType.GENERAL): + + self.dag = dag + + # If the UID is not set, generate a UID. + if uid is None: + uid = dag_crypto.generate_uid_str() + # Else verify that the UID is valid. The UID should be a 16-byte value that is web-safe base64 serialized. + else: + if len(uid) != 22: + raise ValueError(f"The uid {uid} is not a 22 characters in length.") + try: + b = dag_crypto.urlsafe_str_to_bytes(uid) + if len(b) != 16: + raise ValueError("not 16 bytes") + except Exception: + raise ValueError("The uid does not appear to be web-safe base64 string contains a 16 bytes value.") + + # If the UID is the root UID, make sure the vertex type is not general. + # The root vertex needs to be either PAM_NETWORK or PAM_USER, if not set to PAM_NETWORK. + if uid == self.dag.uid and (vertex_type != RefType.PAM_NETWORK and vertex_type != RefType.PAM_USER): + vertex_type = RefType.PAM_NETWORK + self.vertex_type = vertex_type + + # If the name is not defined, use the UID. Name is not persistent in the DAG. + # If you load the DAG, the web service will not return the name. + if name is None: + name = uid + + self._uid = uid + self._name = name + + # The keychain is a list of keys that can be used. + # The keychain may contain multiple keys, when loading the default graph (graph_id) + # For normal editing, the keychain will contain only one key. + self._keychain = [] + if keychain is not None: + if not isinstance(keychain, list): + keychain = [keychain] + self._keychain += keychain + + # Is the keychain corrupt? + self.corrupt = False + + # These are edges to which vertex own this vertex. This vertex belongs to. So this would + self.edges: list[Optional[DAGEdge]] = [] + self.has_uid = [] + + # Flag indicating that this vertex is active. + # This means this vertex has an active edge connected to another vertex. + self.active = True + + # By default, we will save this vertex; not skip_save. + # If in the process building the graph, it is decided that a vertex should not be saved; this can be set to + # prevent the vertex from being saved. + self._skip_save = False + + def __str__(self): + ret = f"Vertex {self.uid}\n" + ret += f" python instance id: {id(self)}\n" + ret += f" name: {self.name}\n" + ret += f" keychain: {self.keychain}\n" + ret += f" active: {self.active}\n" + ret += f" edges:\n" + for edge in self.edges: + ret += f" * type {self.dag.__class__.EDGE_LABEL.get(edge.edge_type)}" + ret += f", connect to {edge.head_uid}" + ret += f", path {edge.path}, " + ret += f", active: {edge.active}" + ret += f", modified: {edge.modified}" + ret += f", content: {'yes' if edge.content is not None else 'no'}" + ret += f", content type: {type(edge.content)}" + ret += "\n" + return ret + + def __repr__(self): + return f"" + + def debug(self, msg: str, level: int = 0): + self.dag.debug(msg, level=level) + + @property + def name(self) -> str: + """ + Get the name for vertex + + If the name is not defined, the UID will be returned. + The name is not persistent. + If loading a DAG, the name will not be set. + + :return: + """ + if self._name is not None: + return self._name + return self._uid + + @property + def key(self) -> Optional[Union[str, bytes]]: + """ + Get a single key from the keychain. + + :return: + """ + + keychain = self.keychain + if len(keychain) > 0: + return self.keychain[0] + + return None + + @property + def skip_save(self): + return self._skip_save + + @skip_save.setter + def skip_save(self, value): + self._skip_save = value + + for vertex in self.has_vertices(): + vertex._skip_save = value + + def add_to_keychain(self, key: Union[str, bytes]): + """ + Add a key to the keychain + + :param key: A decrypted key bytes or encrypted key str + :return: + """ + if key not in self._keychain: + self._keychain.append(key) + + @property + def keychain(self) -> Optional[List[Union[str, bytes]]]: + """ + Get the keychain for the vertex. + + The key is stored on the edges, however, the key belongs to the vertex. + KEY and ACL edges from this vertex will have the same encrypted key. + It is simpler to store the key on the DAGVertex instance. + + The keychain in an array of keys. + When using graph_id = 0, different graphs that have the same UID will + have different keys. + When decrypting DATA edges, each key in the keychain will be tried. + + If the keychain has not been set, check if any edges exist that require a key. + If there are, then generate a random key. + The load process will populate the key. + If the vertex does not have a key in the keychain, it is because this is a newly + added vertex. + + If there are no edges that require a key, then return None. + """ + + # If the vertex is root, then the keychain will be the key bytes. + if self.dag.get_root == self: + self._keychain = [self.dag.key] + + # If the keychain is empty, generate a key for a specific edge type. + elif len(self._keychain) == 0: + for e in self.edges: + if e.edge_type in [EdgeType.KEY, EdgeType.DATA]: + self._keychain.append(dag_crypto.generate_random_bytes(self.dag.__class__.UID_KEY_BYTES_SIZE)) + break + + return self._keychain + + @keychain.setter + def keychain(self, value: List[Union[str, bytes]]): + """ + Set the key in the vertex. + + The save method will use this key for any KEY/ACL edges. + A key of str type means it is encrypted. + """ + self._keychain = value + + @property + def has_decrypted_keys(self) -> Optional[bool]: + """ + Does the vertex have a decrypted keys? + + If the vertex contains a KEY, ACL or DATA edge and if the key is bytes, then the key is decrypted. + If it is a str type, then it is encrypted. + """ + if len(self._keychain) > 0: + for e in self.edges: + if e.edge_type in [EdgeType.KEY, EdgeType.DATA]: + all_decrypted = True + for key in self._keychain: + if not isinstance(key, bytes): + all_decrypted = False + break + return all_decrypted + return None + + @property + def uid(self): + """ + Get the vertex UID. + + Once set, don't allow it to be changed. + """ + return self._uid + + def get_edge(self, vertex: "DAGVertex", edge_type: EdgeType) -> DAGEdge: + high_edge = None + high_version = -1 + for edge in self.edges: + # Get all the edge point at the same vertex. + # Don't include DATA edges. + if edge.head_uid == vertex.uid and edge.edge_type == edge_type: + if edge.version > high_version: + high_version = edge.version + high_edge = edge + return high_edge + + def get_highest_edge_version(self, head_uid: str) -> Tuple[int, Optional[DAGEdge]]: + """ + Find the highest DAGEdge version of all edge types. + + :param head_uid: + :return: + """ + + high_edge = None + high_version = -1 + for edge in self.edges: + # Get all the edge point at the same vertex. + # Don't include DATA edges. + if edge.head_uid == head_uid: + if edge.version > high_version: + high_edge = edge + high_version = edge.version + return high_version, high_edge + + def edge_count(self, vertex: "DAGVertex", edge_type: EdgeType) -> int: + """ + Get the number of edges between two vertices. + + :param vertex: + :param edge_type: + :return: + """ + count = 0 + for edge in self.edges: + if edge.head_uid == vertex.uid and edge.edge_type == edge_type: + count += 1 + return count + + def edge_by_type(self, vertex: "DAGVertex", edge_type: EdgeType) -> List[DAGEdge]: + edge_list = [] + for edge in self.edges: + if edge.edge_type == edge_type and edge.head_uid == vertex.uid: + edge_list.append(edge) + return edge_list + + @property + def has_data(self) -> bool: + + """ + Does this vertex contain a DATA edge? + + :return: True if vertex has a DATA edge. + """ + + for item in self.edges: + if item.edge_type == EdgeType.DATA: + return True + return False + + def get_data(self, index: Optional[int] = None) -> Optional[DAGEdge]: + """ + Get data edge + + If the index is None or 0, the latest data edge will be returned. + A positive and negative, non-zero, index will return the same data. + It will be the absolute value of the index from the latest data. + This means the 1 or -1 will return the prior data. + + If there is no data, None is returned. + + :param index: + :return: + """ + + data_list = self.edge_by_type(self, EdgeType.DATA) + data_count = len(data_list) + if data_count == 0: + return None + + # If the index is None, get the latest. + if index is None or index == 0: + index = -1 + # Since -1 is the current, switch index to a negative number and subtract one more. + # For example, 1 means prior, -1 would be the latest, so we need to subtract one to get -2. + elif index > 0: + index *= -1 + index -= 1 + # If already a negative index, just subtract one. + else: + index -= 1 + + try: + data = data_list[index] + except IndexError: + raise ValueError(f"The index is not valid. Currently there are {data_count} data edges") + + return data + + def add_data(self, + content: Any, + is_encrypted: bool = False, + is_serialized: bool = False, + path: Optional[str] = None, + modified: bool = True, + from_load: bool = False, + needs_encryption: bool = True): + + """ + Add a DATA edge to the vertex. + + :param content: The content to store in the DATA edge. + :param is_encrypted: Is the content encrypted? + :param is_serialized: Is the content base64 serialized? + :param path: Simple string tag to identify the edge. + :param modified: Does this modify the content? + By default, adding a DATA edge will flag that the edge has been modified. + If loading, modified will be set to False. + :param from_load: This call is being performed the load() method. + Do not validate adding data. + :param needs_encryption: Default is True. + Does the content need to be encrypted? + """ + + self.debug(f"connect {self.uid} to DATA edge", level=1) + + # Are we trying to add DATA to a deleted vertex? + + if not self.active: + # If deleted, there will not be a KEY to decrypt the data. + # Throw an exception if not from the loading method. + if not from_load: + raise DAGDeletionException("This vertex is not active. Cannot add DATA edge.") + # If from loading, do not add and do not throw an exception. + return + + # Make sure the vertex belongs before auto saving. If it does not belong, it's just an orphan right now. + # This only is checked if using this module is used to create the graph. + if self.belongs_to_a_vertex is False and from_load is False: + raise DAGVertexException(f"Before adding data, connect this vertex {self.uid} to another vertex.") + + # Make sure that we have a KEY. + # Allow a DATA edge to be connected to the root vertex, which will not have a KEY edge. + # Or if we are loading, allow out of sync edges. + + if needs_encryption: + found_key_edge = self.dag.get_root == self or from_load is True + if found_key_edge is False: + for edge in self.edges: + if edge.edge_type == EdgeType.KEY: + found_key_edge = True + if found_key_edge is False: + raise DAGKeyException(f"Cannot add DATA edge without a KEY edge for vertex {self.uid}.") + + # Get the prior data, set the version and inactive the prior data. + version = 0 + prior_data = self.get_data() + if prior_data is not None: + version = prior_data.version + 1 + prior_data.active = False + + # Check if DATA has already been created/modified per this session. + # If it has, the prior will be overwritten, no sense on saving this edge. + # If warning is enabled, print a debug message and the stacktrace to we what added the DATA. + if self.dag.dedup_edge and prior_data.modified: + prior_data.skip_on_save = True + if self.dag.dedup_edge_warning: + self.dag.debug("DATA edge added multiple times for session. stacktrace on what did it follows ...") + self.dag.debug_stacktrace() + + # The tail UID is the UID of the vertex. Since data loops back to the vertex, the head UID is the same. + self.edges.append( + DAGEdge( + vertex=self, + edge_type=EdgeType.DATA, + head_uid=self.uid, + version=version, + content=content, + path=path, + modified=modified, + is_serialized=is_serialized, + is_encrypted=is_encrypted, + needs_encryption=needs_encryption + ) + ) + + # If using a history level, we want to remove edges if we exceed the history level. + # The history level is per edge type. + # It's FIFO, so we will remove the first edge type if we exceed the history level. + if self.dag.history_level > 0: + data_count = self.data_count() + while data_count > self.dag.history_level: + for index in range(0, len(self.edges) - 1): + if self.edges[index].edge_type == EdgeType.DATA: + del self.edges[index] + data_count -= 1 + break + + self.dag.do_auto_save() + + def data_count(self): + return self.edge_count(self, EdgeType.DATA) + + def data_delete(self): + + # Get the DATA edge. + # It will be a reference to itself. + data_edge = self.get_edge(self, EdgeType.DATA) + if data_edge is None: + self.debug("cannot delete the data, no data edge exists.") + + data_edge.active = False + + self.belongs_to( + vertex=self, + edge_type=EdgeType.DELETION + ) + self.debug(f"deleted data edge for {self.uid}") + + @property + def latest_data_version(self): + version = -1 + for edge in self.edges: + if edge.edge_type == EdgeType.DATA and edge.version > version: + version = edge.version + return version + + @property + def content(self) -> Optional[Union[str, bytes]]: + """ + Get the content of the active DATA edge. + + If the content is a str, then the content is encrypted. + """ + data_edge = self.get_data() + if data_edge is None: + return None + return data_edge.content + + @property + def content_as_dict(self) -> Optional[dict]: + """ + Get the content from the active DATA edge as a dictionary. + :return: Content as a dictionary. + """ + data_edge = self.get_data() + if data_edge is None: + return None + return data_edge.content_as_dict + + @property + def content_as_str(self) -> Optional[str]: + """ + Get the content from the active DATA edge as a str. + :return: Content as a str. + """ + + data_edge = self.get_data() + if data_edge is None: + return None + return data_edge.content_as_str + + def content_as_object(self, + meta_class: pydantic._internal._model_construction.ModelMetaclass) -> Optional[pydantic.BaseModel]: + """ + Get the content as a pydantic based object. + + :param meta_class: The class to return + :return: + """ + data_edge = self.get_data() + if data_edge is None: + return None + + return data_edge.content_as_object(meta_class) + + @property + def has_key(self) -> bool: + + """ + Does this vertex contain any KEY or ACL edges? + + :return: True if vertex has a KEY or ACL edge. + """ + + for item in self.edges: + if item.edge_type == EdgeType.KEY: + return True + return False + + def belongs_to(self, + vertex: "DAGVertex", + edge_type: EdgeType, + content: Optional[Any] = None, + is_encrypted: bool = False, + path: Optional[str] = None, + modified: bool = True, + from_load: bool = False): + + """ + Connect a vertex to another vertex (as the owner). + + This will create an edge between this vertex and the passed in vertex. + The passed in vertex will own this vertex. + + If the edge_type is a KEY or ACL, data will be treated as a key. If a DATA edge already exists, the + edge_type will be changed to a KEY, if not a KEY or ACL edge_type. + + :param vertex: The vertex has this vertex. + :param edge_type: The edge type that connects the two vertices. + :param content: Data to store as the edges content. + :param is_encrypted: Is the content encrypted? + :param path: Text tag for the edge. + :param modified: Does adding this edge modify the stored DAG? + :param from_load: Is being connected from load() method? + :return: + """ + + self.debug(f"connect {self.uid} to {vertex.uid} with edge type {edge_type.value}", level=1) + + if vertex is None: + raise ValueError("Vertex is blank.") + if self.uid == self.dag.uid and not (edge_type == EdgeType.DATA or edge_type == EdgeType.DELETION): + if not from_load: + raise DAGIllegalEdgeException(f"Cannot create edge to self for edge type {edge_type}.") + self.dag.debug(f"vertex {self.uid} , the root vertex, " + f"attempted to create '{edge_type.value}' edge to self, skipping.") + return + + # Cannot make an edge to the same vertex, unless the edge type is a DELETION. + # Normally an edge to self is a DATA type, use add_data for that. + # A DELETION edge to self is allowed. + # Just means the DATA edge is being deleted. + if self.uid == vertex.uid and not (edge_type == EdgeType.DATA or edge_type == EdgeType.DELETION): + if not from_load: + raise DAGIllegalEdgeException(f"Cannot create edge to self for edge type {edge_type}.") + self.dag.debug(f"vertex {self.uid} attempted to make '{edge_type.value}' to self, skipping.") + return + + # Figure out what version of the edge we are. + + version, version_edge = self.get_highest_edge_version(head_uid=vertex.uid) + + # If the new edge is not DELETION + if edge_type != EdgeType.DELETION: + + # Find the current active edge for this edge type to make it inactive. + current_edge_by_type = self.get_edge(vertex, edge_type) + if current_edge_by_type is not None: + current_edge_by_type.active = False + + # Check if edge has already been created/modified per this session. + # If it has, the prior will be overwritten, no sense on saving this edge. + # If warning is enabled, print a debug message and the stacktrace to we what added the DATA. + if self.dag.dedup_edge and current_edge_by_type.modified: + current_edge_by_type.skip_on_save = True + if self.dag.dedup_edge_warning: + self.dag.debug(f"{edge_type.value.upper()} edge added multiple times for session. " + "stacktrace on what did it follows ...") + self.dag.debug_stacktrace() + + # If we are adding a non-DELETION edge, it will inactivate the DELETION edge. + highest_deletion_edge = self.get_edge(vertex, EdgeType.DELETION) + if highest_deletion_edge is not None: + highest_deletion_edge.active = False + + # For this purpose, only DATA edge are allow to set the is_encrypted flag. + if edge_type != EdgeType.DATA: + is_encrypted = False + + # Should we activate the vertex again? + if not self.active: + + # If the vertex is already inactive, and we are trying to delete, return. + if edge_type == EdgeType.DELETION: + return + + if self.dag.dedup_edge and version_edge.modified: + version_edge.skip_on_save = True + if self.dag.dedup_edge_warning: + self.dag.debug("edge was deleted in session, will not save DELETION edge") + self.dag.debug_stacktrace() + else: + self.dag.debug(f"vertex {self.uid} was inactive; reactivating vertex.") + self.active = True + + # Create and append a new DAGEdge instance. + # Disable the auto saving after the content is changed since the edge has not been appended yet. + # Once the edge is created, disable blocking auto save for content changes. + edge = DAGEdge( + vertex=self, + edge_type=edge_type, + head_uid=vertex.uid, + version=version + 1, + block_content_auto_save=True, + content=content, + is_encrypted=is_encrypted, + path=path, + modified=modified + ) + edge.block_content_auto_save = False + + self.edges.append(edge) + if self.uid not in vertex.has_uid: + vertex.has_uid.append(self.uid) + + self.dag.do_auto_save() + + def belongs_to_root(self, + edge_type: EdgeType, + path: Optional[str] = None): + + """ + Connect the vertex to the root vertex. + + :param edge_type: The type of edge to use for the connection. + :param path: Short tag for this edge. + :return: + """ + + self.debug(f"connect {self.uid} to root", level=1) + + if self.uid == self.dag.uid: + raise DAGIllegalEdgeException("Cannot create edge to self.") + + if not self.active: + raise DAGDeletionException("This vertex is not active. Cannot connect to root.") + + # We are adding the root, we can enable auto save now. + # We can get the correct stream id with an edge to the root vertex. + self.belongs_to(self.dag.get_root, edge_type=edge_type, path=path) + + self.dag.allow_auto_save = True + self.dag.do_auto_save() + + def has_vertices(self, edge_type: Optional[EdgeType] = None, allow_inactive: bool = False, + allow_self_ref: bool = False) -> List["DAGVertex"]: + + """ + Get a list of vertices that belong to this vertex. + :return: List of DAGVertex + """ + + vertices = [] + for uid in self.has_uid: + + # This will remove DATA and DATA that have changed to DELETION edges. + # Prevent looping. + if uid == self.uid and allow_self_ref is False: + continue + + vertex = self.dag.get_vertex(uid) + if edge_type is not None: + edge = vertex.get_edge(self, edge_type=edge_type) + if edge is not None: + vertices.append(vertex) + + # If no edge type was specified, do not return DATA and DELETION. + # Also do not include vertices that are inactive by default. + elif edge_type != EdgeType.DATA and edge_type != EdgeType.DELETION: + if vertex.active is True or allow_inactive is True: + vertices.append(vertex) + + return vertices + + def has(self, vertex: "DAGVertex", edge_type: Optional[EdgeType] = None) -> bool: + + """ + Does this vertex have the passed in vertex? + + :return: True if request vertex belongs to this vertex. + False if it does not. + """ + + vertices = self.has_vertices(edge_type=edge_type) + return vertex in vertices + + def belongs_to_vertices(self) -> List["DAGVertex"]: + """ + Get a list of vertices that this vertex belongs to + :return: + """ + + vertices = [] + for edge in self.edges: + # If the edge is not a DATA or DELETION type, and the edge is the highest version/active + if edge.edge_type != EdgeType.DATA and edge.edge_type != EdgeType.DELETION and edge.active is True: + + # The head will point at the remote vertex. + # If it is active, and not already in the list, add it to the list of vertices this vertex belongs to. + vertex = self.dag.get_vertex(edge.head_uid) + if vertex.active is True and vertex not in vertices: + vertices.append(vertex) + return vertices + + @property + def belongs_to_a_vertex(self) -> bool: + """ + Does this vertex belong to another vertex? + :return: + """ + + # If this is the root vertex, return True. + # Where this is being called should handle operations involving the root vertex. + if self.dag.get_root == self: + return True + + return len(self.belongs_to_vertices()) > 0 + + def disconnect_from(self, vertex: "DAGVertex", path: Optional[str] = None): + + """ + Disconnect this vertex from another vertex. + + This will add a DELETION edge between two vertices. + If the vertex no longer belongs to another vertex, the vertex will be deleted. + + :param vertex: The vertex this vertex belongs to + :param path: an Optional path for the DELETION edge. + :return: + """ + + if vertex is None: + raise ValueError("Vertex is blank.") + + # Flag all the edges as inactive. + for edge in self.edges: + if edge.head_uid == vertex.uid and edge.edge_type: + edge.active = False + + # Add the DELETION edge + self.belongs_to( + vertex=vertex, + edge_type=EdgeType.DELETION, + path=path + ) + + # If all the KEY edges are inactive now, the DATA edge needs to be made inactive. + # There is no longer a KEY edge to decrypt the DATA. + has_active_key_edge = False + for edge in self.edges: + if edge.edge_type == EdgeType.KEY and edge.active is True: + has_active_key_edge = True + break + if not has_active_key_edge: + for edge in self.edges: + if edge.edge_type == EdgeType.DATA: + edge.active = False + + if not self.belongs_to_a_vertex: + self.debug(f"vertex {self.uid} is now not active", level=1) + self.active = False + + def delete(self, ignore_vertex: Optional["DAGVertex"] = None): + + """ + Delete a vertex + + Deleting a vertex will inactivate the vertex. + It will also inactivate any vertices, and their edges, that belong to the vertex. + It will not inactivate a vertex that belongs to multiple vertices. + :return: + """ + + def _delete(vertex, prior_vertex): + + # Do not delete the root vertex + if vertex.uid == self.dag.uid: + self.debug(f" * vertex is root, cannot delete root", level=2) + return + + self.debug(f"> checking vertex {vertex.uid}") + + # Should we ignore a vertex? + # If deleting an edge, we want to ignore the vertex that owns the edge. + # This prevents circular calls. + if ignore_vertex is not None and vertex.uid == ignore_vertex.uid: + return + + # Get a list of vertices that belong to this vertex (v) + has_v = vertex.has_vertices() + + if len(has_v) > 0: + self.debug(f" * vertex has {len(has_v)} vertices that belong to it.", level=2) + for v in has_v: + self.debug(f" checking {v.uid}") + _delete(v, vertex) + else: + self.debug(f" * vertex {vertex.uid} has NO vertices.", level=2) + + for e in list(vertex.edges): + if e.edge_type != EdgeType.DATA and (prior_vertex is None or e.head_uid == prior_vertex.uid): + e.delete() + if vertex.belongs_to_a_vertex is False: + self.debug(f" * inactive vertex {vertex.uid}") + vertex.active = False + + self.debug(f"DELETING vertex {self.uid}", level=3) + + # Perform the DELETION edges save in one batch. + # Get the current allowed auto save state, and disable auto save. + current_allow_auto_save = self.dag.allow_auto_save + self.dag.allow_auto_save = False + + _delete(self, None) + + # Restore the allow auto save and trigger auto save() + self.dag.allow_auto_save = current_allow_auto_save + self.dag.do_auto_save() + + def walk_down_path(self, path: Union[str, List[str]]) -> Optional["DAGVertex"]: + + """ + Walk the vertices using the path and return the vertex starting at this vertex. + + :param path: An array of path string, or string where the path is joined with a "/" (i.e., think URL) + :return: DAGVertex is the path completes, None is failure. + """ + + self.debug(f"walking path in vertex {self.uid}", level=2) + + # If the path is str, break it into an array. Get rid of leading / + if isinstance(path, str): + self.debug("path is str, break into array", level=2) + if path.startswith("/"): + path = path[1:] + path = path.split("/") + + # Unshift the path + + current_path = path[0] + path = path[1:] + self.debug(f"current path: {current_path}", level=2) + self.debug(f"path left: {path}", level=2) + + # Check the DATA edges. + # If a DATA edge has the current path, return this vertex. + for edge in self.edges: + if edge.edge_type != EdgeType.DATA: + continue + if edge.path == current_path: + return self + + # Check the vertices that belong to this vertex for edges going to this vertex and the path matches. + for vertex in self.has_vertices(): + self.debug(f"vertex {self.uid} has {vertex.uid}", level=2) + for edge in vertex.edges: + # If the edge matches the current path, the head of the edge is this vertex, a route exists. + if edge.path == current_path and edge.head_uid == self.uid: + # If there is no path left, this is our vertex + if len(path) == 0: + return vertex + # If there is still more path, call vertex to walk more of the path. + else: + return vertex.walk_down_path(path) + return None + + def get_paths(self) -> List[str]: + """ + Get paths from this vertex to vertex owned by this vertex. + :return: List of string paths + """ + + paths = [] + for vertex in self.has_vertices(): + for edge in vertex.edges: + if edge.path is None or edge.path == "": + continue + paths.append(edge.path) + + return paths + + def clean_edges(self): + """ + Recursively clean edges and break circular references. + + This method clears all edge lists and reference tracking to help + Python's garbage collector clean up circular references between + DAG, DAGVertex, and DAGEdge objects. + """ + # Recursively clean child vertices first + for vertex in self.has_vertices(): + vertex.clean_edges() + + # Clear all reference lists + self.edges.clear() + self.has_uid.clear() diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/exceptions.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/exceptions.py new file mode 100644 index 00000000..d1ce951c --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/exceptions.py @@ -0,0 +1,80 @@ +from __future__ import annotations +from typing import Any, Optional + + +class DAGException(Exception): + + def __init__(self, msg: Any, uid: Optional[str] = None): + if not isinstance(msg, str): + msg = str(msg) + + self.msg = msg + self.uid = uid + + super().__init__(self.msg) + + def __str__(self): + return self.msg + + def __repr__(self): + return self.msg + + +class DAGKeyIsEncryptedException(DAGException): + pass + + +class DAGDataEdgeNotFoundException(DAGException): + pass + + +class DAGDeletionException(DAGException): + pass + + +class DAGConfirmException(DAGException): + pass + + +class DAGPathException(DAGException): + pass + + +class DAGVertexAlreadyExistsException(DAGException): + pass + + +class DAGContentException(DAGException): + pass + + +class DAGDefaultGraphException(DAGException): + pass + + +class DAGIllegalEdgeException(DAGException): + pass + + +class DAGKeyException(DAGException): + pass + + +class DAGDataException(DAGException): + pass + + +class DAGVertexException(DAGException): + pass + + +class DAGEdgeException(DAGException): + pass + + +class DAGCorruptException(DAGException): + pass + + +class DAGConnectionException(DAGException): + pass diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/__init__.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/__init__.py new file mode 100644 index 00000000..b2b36405 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/__init__.py @@ -0,0 +1,56 @@ +from __future__ import annotations +import logging +from ..dag_types import SyncQuery, Ref, RefType, DAGData, DataPayload, EdgeType +from ....proto import GraphSync_pb2 as gs_pb2 +from pydantic import BaseModel +from typing import Optional, Union, List, TYPE_CHECKING + +if TYPE_CHECKING: # pragma: no cover + Logger = Union[logging.RootLogger, logging.Logger] + + +class SyncResult(BaseModel): + sync_point: int = 0 + data: List[DAGData] = [] + has_more: bool = False + + +class DataStructBase: + + def __init__(self, + logger: Optional[Logger] = None): + + if logger is None: + logger = logging.getLogger() + self.logger = logger + + def sync_query(self, + stream_id: str, + sync_point: int = 0, + graph_id: Optional[int] = None) -> Union[SyncQuery, gs_pb2.GraphSyncQuery]: + pass + + @staticmethod + def origin_ref(origin_uid: str, + name: str) -> Union[Ref, gs_pb2.GraphSyncRef]: + pass + + def data(self, + data_type: EdgeType, + tail_uid: str, + content: Optional[bytes] = None, + head_uid: Optional[str] = None, + tail_name: Optional[str] = None, + head_name: Optional[str] = None, + tail_ref_type: Optional[RefType] = None, + head_ref_type: Optional[RefType] = None, + path: Optional[str] = None) -> Union[DAGData,gs_pb2.GraphSyncData]: + + pass + + @staticmethod + def payload(origin_ref: Union[Ref, gs_pb2.GraphSyncRef], + data_list: List[Union[DAGData, gs_pb2.GraphSyncData]], + graph_id: Optional[int] = None) -> Union[DataPayload, gs_pb2.GraphSyncAddDataRequest]: + + pass diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/default.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/default.py new file mode 100644 index 00000000..6f82cf25 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/default.py @@ -0,0 +1,82 @@ +from __future__ import annotations +from . import DataStructBase +from ..dag_types import SyncQuery, Ref, RefType, DAGData, DataPayload, EdgeType, SyncData +from ..dag_crypto import generate_random_bytes, generate_uid_str +from .... import utils +import base64 +from typing import Optional, List + + +class DataStruct(DataStructBase): + + def sync_query(self, + stream_id: str, + sync_point: int = 0, + graph_id: Optional[int] = None) -> SyncQuery: + + return SyncQuery( + streamId=stream_id, + deviceId=base64.urlsafe_b64encode(generate_random_bytes(16)).decode(), + syncPoint=sync_point, + graphId=graph_id + ) + + @staticmethod + def get_sync_result(results: bytes) -> SyncData: + res = SyncData.model_validate_json(results) + return res + + @staticmethod + def origin_ref(origin_ref_value: bytes, + name: str) -> Ref: + + return Ref( + type=RefType.DEVICE, + value=generate_uid_str(uid_bytes=origin_ref_value), + name=name + ) + + def data(self, + data_type: EdgeType, + tail_uid: str, + content: Optional[bytes] = None, + head_uid: Optional[str] = None, + tail_name: Optional[str] = None, + head_name: Optional[str] = None, + tail_ref_type: Optional[RefType] = None, + head_ref_type: Optional[RefType] = None, + path: Optional[str] = None) -> DAGData: + + if content is not None: + content = utils.base64_url_encode(content) + + return DAGData( + type=data_type, + content=content, + # tail point at this vertex, so it uses this vertex's uid. + ref=Ref( + type=tail_ref_type, + value=tail_uid, + name=tail_name + ), + # Head, the arrowhead, points at the vertex this vertex belongs to, the parent. + # Apparently, for DATA edges, the parentRef is allowed to be None. + # Doesn't hurt to send it. + parentRef=Ref( + type=head_ref_type, + value=head_uid, + name=head_name + ), + path=path + ) + + @staticmethod + def payload(origin_ref: Ref, + data_list: List[DAGData], + graph_id: Optional[int] = None) -> DataPayload: + + return DataPayload( + origin=origin_ref, + dataList=data_list, + graphId=graph_id + ) diff --git a/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/protobuf.py b/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/protobuf.py new file mode 100644 index 00000000..3581ab21 --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/keeper_dag/struct/protobuf.py @@ -0,0 +1,193 @@ +from __future__ import annotations +import logging +from typing import Optional, List, Union + +from ....proto import GraphSync_pb2 as gs_pb2 +from ..dag_types import RefType, EdgeType, Ref, SyncData, SyncDataItem, SyncQuery, DataPayload, DAGData +from .. import dag_crypto + + +class DataStructBase: + + def __init__(self, + logger: Optional[logging.Logger] = None): + + if logger is None: + logger = logging.getLogger() + self.logger = logger + + def sync_query(self, + stream_id: str, + sync_point: int = 0, + graph_id: Optional[int] = None) -> Union[SyncQuery, gs_pb2.GraphSyncQuery]: + pass + + @staticmethod + def origin_ref(origin_uid: str, + name: str) -> Union[Ref, gs_pb2.GraphSyncRef]: + pass + + def data(self, + data_type: EdgeType, + tail_uid: str, + content: Optional[bytes] = None, + head_uid: Optional[str] = None, + tail_name: Optional[str] = None, + head_name: Optional[str] = None, + tail_ref_type: Optional[RefType] = None, + head_ref_type: Optional[RefType] = None, + path: Optional[str] = None) -> Union[DAGData,gs_pb2.GraphSyncData]: + + pass + + @staticmethod + def payload(origin_ref: Union[Ref, gs_pb2.GraphSyncRef], + data_list: List[Union[DAGData, gs_pb2.GraphSyncData]], + graph_id: Optional[int] = None) -> Union[DataPayload, gs_pb2.GraphSyncAddDataRequest]: + + pass + + +class DataStruct(DataStructBase): + + # https://github.com/Keeper-Security/keeperapp-protobuf/blob/master/GraphSync.proto + + REF_TO_PB_MAP = { + RefType.GENERAL: gs_pb2.RefType.RFT_GENERAL, + RefType.USER: gs_pb2.RefType.RFT_USER, + RefType.DEVICE: gs_pb2.RefType.RFT_DEVICE, + RefType.REC: gs_pb2.RefType.RFT_REC, + RefType.FOLDER: gs_pb2.RefType.RFT_FOLDER, + RefType.TEAM: gs_pb2.RefType.RFT_TEAM, + RefType.ENTERPRISE: gs_pb2.RefType.RFT_ENTERPRISE, + RefType.PAM_DIRECTORY: gs_pb2.RefType.RFT_PAM_DIRECTORY, + RefType.PAM_MACHINE: gs_pb2.RefType.RFT_PAM_MACHINE, + RefType.PAM_DATABASE: gs_pb2.RefType.RFT_PAM_DATABASE, + RefType.PAM_USER: gs_pb2.RefType.RFT_PAM_USER, + RefType.PAM_NETWORK: gs_pb2.RefType.RFT_PAM_NETWORK, + RefType.PAM_BROWSER: gs_pb2.RefType.RFT_PAM_BROWSER, + RefType.CONNECTION: gs_pb2.RefType.RFT_CONNECTION, + RefType.WORKFLOW: gs_pb2.RefType.RFT_WORKFLOW, + RefType.NOTIFICATION: gs_pb2.RefType.RFT_NOTIFICATION, + RefType.USER_INFO: gs_pb2.RefType.RFT_USER_INFO, + RefType.TEAM_INFO: gs_pb2.RefType.RFT_TEAM_INFO, + RefType.ROLE: gs_pb2.RefType.RFT_ROLE + } + + DATA_TO_PB_MAP = { + EdgeType.DATA: gs_pb2.GraphSyncDataType.GSE_DATA, + EdgeType.KEY: gs_pb2.GraphSyncDataType.GSE_KEY, + EdgeType.LINK: gs_pb2.GraphSyncDataType.GSE_LINK, + EdgeType.ACL: gs_pb2.GraphSyncDataType.GSE_ACL, + EdgeType.DELETION: gs_pb2.GraphSyncDataType.GSE_DELETION + } + + PB_TO_REF_MAP = {v: k for k, v in REF_TO_PB_MAP.items()} + PB_TO_DATA_MAP = {v: k for k, v in DATA_TO_PB_MAP.items()} + + def sync_query(self, + stream_id: str, + sync_point: int = 0, + graph_id: Optional[int] = None) -> gs_pb2.GraphSyncQuery: + + return gs_pb2.GraphSyncQuery( + streamId=dag_crypto.urlsafe_str_to_bytes(stream_id), + origin=dag_crypto.generate_random_bytes(16), + syncPoint=sync_point, + + # Use the default from KRouter; currently 500 + maxCount=0 + ) + + @staticmethod + def get_sync_result(results: bytes) -> SyncData: + + try: + result = gs_pb2.GraphSyncResult() + result.ParseFromString(results) + except Exception as err: + raise Exception(f"Could not parse the GraphSyncResult message: {err}") + + message = gs_pb2.GraphSyncResult() + message.ParseFromString(results) + + data_list: List[SyncDataItem] = [] + for item in message.data: + data_list.append( + SyncDataItem( + type=DataStruct.PB_TO_DATA_MAP.get(item.data.type), + # content=bytes_to_str(item.data.content), + content=item.data.content, + content_is_base64=False, + ref=Ref( + type=DataStruct.PB_TO_REF_MAP.get(item.data.ref.type), + value=dag_crypto.bytes_to_urlsafe_str(item.data.ref.value), + ), + parentRef=Ref( + type=DataStruct.PB_TO_REF_MAP.get(item.data.parentRef.type), + value=dag_crypto.bytes_to_urlsafe_str(item.data.parentRef.value) + ), + path=item.data.path + ) + ) + + return SyncData( + syncPoint=message.syncPoint, + data=data_list, + hasMore=message.hasMore + ) + + @staticmethod + def origin_ref(origin_ref_value: bytes, + name: str) -> gs_pb2.GraphSyncRef: + + return gs_pb2.GraphSyncRef( + type=gs_pb2.RefType.RFT_DEVICE, + value=origin_ref_value, + name=name + ) + + def data(self, + data_type: EdgeType, + tail_uid: str, + content: Optional[bytes] = None, + head_uid: Optional[str] = None, + tail_name: Optional[str] = None, + head_name: Optional[str] = None, + tail_ref_type: Optional[RefType] = None, + head_ref_type: Optional[RefType] = None, + path: Optional[str] = None) -> gs_pb2.GraphSyncData: + + if isinstance(tail_uid, str): + tail_uid = dag_crypto.urlsafe_str_to_bytes(tail_uid) + if head_uid is not None and isinstance(head_uid, str): + head_uid = dag_crypto.urlsafe_str_to_bytes(head_uid) + + return gs_pb2.GraphSyncData( + type=DataStruct.DATA_TO_PB_MAP.get(data_type), + content=content, + # tail point at this vertex, so it uses this vertex's uid. + ref=gs_pb2.GraphSyncRef( + type=DataStruct.REF_TO_PB_MAP.get(tail_ref_type), + value=tail_uid, + name=tail_name + ), + # Head, the arrowhead, points at the vertex this vertex belongs to, the parent. + # Apparently, for DATA edges, the parentRef is allowed to be None. + # Doesn't hurt to send it. + parentRef=gs_pb2.GraphSyncRef( + type=DataStruct.REF_TO_PB_MAP.get(head_ref_type), + value=head_uid, + name=head_name + ), + path=path + ) + + @staticmethod + def payload(origin_ref: gs_pb2.GraphSyncRef, + data_list: List[gs_pb2.GraphSyncData], + graph_id: Optional[int] = None) -> gs_pb2.GraphSyncAddDataRequest: + + return gs_pb2.GraphSyncAddDataRequest( + origin=origin_ref, + data=data_list) diff --git a/keepersdk-package/src/keepersdk/helpers/pam_config_facade.py b/keepersdk-package/src/keepersdk/helpers/pam_config_facade.py new file mode 100644 index 00000000..ff30295d --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/pam_config_facade.py @@ -0,0 +1,77 @@ + +from ..vault.record_facades import TypedRecordFacade, string_getter, string_list_getter, string_setter +from ..vault import vault_record, record_types + + +class PamConfigurationRecordFacade(TypedRecordFacade): + _controller_uid_getter = string_getter('controllerUid') + _controller_uid_setter = string_setter('controllerUid') + _folder_uid_getter = string_getter('folderUid') + _folder_uid_setter = string_setter('folderUid') + _resource_ref_getter = string_list_getter('resourceRef') + _file_ref_getter = string_getter('_file_ref') + + def __init__(self): + super(PamConfigurationRecordFacade, self).__init__() + self._pam_resources = None + self._port_mapping = None + self._file_ref = None + + def load_typed_fields(self): + if self.record: + self._pam_resources = next((x for x in self.record.fields if x.type == 'pamResources'), None) + if not self._pam_resources: + self._pam_resources = vault_record.TypedField.new_field('pamResources', []) + self.record.fields.append(self._pam_resources) + + if len(self._pam_resources.value) > 0: + if not isinstance(self._pam_resources.value[0], dict): + self._pam_resources.value.clear() + + if len(self._pam_resources.value) == 0: + if 'pamResources' in record_types.FieldTypes and isinstance(record_types.FieldTypes['pamResources'].value, dict): + value = record_types.FieldTypes['pamResources'].value.copy() + else: + value = {} + self._pam_resources.value.append(value) + + self._port_mapping = next((x for x in self.record.fields + if x.type == 'multiline' and x.label == 'portMapping'), None) + if self._port_mapping is None: + self._port_mapping = vault_record.TypedField.new_field('multiline', [], field_label='portMapping') + self.record.fields.append(self._port_mapping) + + self._file_ref = next((x for x in self.record.fields if x.type == 'fileRef' and x.label == 'rotationScripts'), None) + if self._file_ref is None: + self._file_ref = vault_record.TypedField.new_field('fileRef', [], field_label='rotationScripts') + self.record.fields.append(self._file_ref) + else: + self._pam_resources = None + self._port_mapping = None + self._file_ref = None + + super(PamConfigurationRecordFacade, self).load_typed_fields() + + @property + def controller_uid(self): + return PamConfigurationRecordFacade._controller_uid_getter(self) + + @controller_uid.setter + def controller_uid(self, value): + PamConfigurationRecordFacade._controller_uid_setter(self, value) + + @property + def folder_uid(self): + return PamConfigurationRecordFacade._folder_uid_getter(self) + + @folder_uid.setter + def folder_uid(self, value): + PamConfigurationRecordFacade._folder_uid_setter(self, value) + + @property + def resource_ref(self): + return PamConfigurationRecordFacade._resource_ref_getter(self) + + @property + def rotation_scripts(self): + return PamConfigurationRecordFacade._file_ref_getter(self) diff --git a/keepersdk-package/src/keepersdk/helpers/tunnel/tunnel_graph.py b/keepersdk-package/src/keepersdk/helpers/tunnel/tunnel_graph.py new file mode 100644 index 00000000..9acc304b --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/tunnel/tunnel_graph.py @@ -0,0 +1,540 @@ +import logging + +from . import tunnel_utils + +from ..keeper_dag.connection import Connection +from ..keeper_dag.dag_types import EdgeType, RefType, PamEndpoints +from ..keeper_dag.dag import DAG +from ..keeper_dag.dag_vertex import DAGVertex +from ..keeper_dag.dag_crypto import generate_random_bytes + +from ...vault import vault_online, vault_record + +logger = logging.getLogger(__name__) + +def get_vertex_content(vertex): + return_content = None + if vertex is None: + return return_content + try: + return_content = vertex.content_as_dict + except Exception as e: + logger.debug(f"Error getting vertex content: {e}") + return_content = None + return return_content + + +class TunnelDAG: + def __init__(self, vault: vault_online.VaultOnline, encrypted_session_token, encrypted_transmission_key, record_uid: str, is_config=False): + config_uid = None + if not is_config: + config_uid = tunnel_utils.get_config_uid(vault, encrypted_session_token, encrypted_transmission_key, record_uid) + if not config_uid: + config_uid = record_uid + self.record = vault_record.PasswordRecord() + self.record.record_uid = config_uid + self.record.record_key = generate_random_bytes(32) + self.encrypted_session_token = encrypted_session_token + self.encrypted_transmission_key = encrypted_transmission_key + self.conn = Connection(vault=vault, encrypted_transmission_key=self.encrypted_transmission_key, + encrypted_session_token=self.encrypted_session_token, + use_write_protobuf=True + ) + self.linking_dag = DAG(conn=self.conn, record=self.record, graph_id=0, write_endpoint=PamEndpoints.PAM) + try: + self.linking_dag.load() + except Exception as e: + import logging + logging.debug(f"Error loading config: {e}") + + def resource_belongs_to_config(self, resource_uid): + if not self.linking_dag.has_graph: + return False + resource_vertex = self.linking_dag.get_vertex(resource_uid) + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + return resource_vertex and config_vertex.has(resource_vertex, EdgeType.LINK) + + def user_belongs_to_config(self, user_uid): + if not self.linking_dag.has_graph: + return False + user_vertex = self.linking_dag.get_vertex(user_uid) + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + res_content = False + if user_vertex and config_vertex and config_vertex.has(user_vertex, EdgeType.ACL): + acl_edge = user_vertex.get_edge(config_vertex, EdgeType.ACL) + _content = acl_edge.content_as_dict + res_content = _content.get('belongs_to', False) if _content else False + return res_content + + def check_tunneling_enabled_config(self, enable_connections=None, enable_tunneling=None, + enable_rotation=None, enable_session_recording=None, + enable_typescript_recording=None, remote_browser_isolation=None): + if not self.linking_dag.has_graph: + return False + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + content = get_vertex_content(config_vertex) + if content is None or not content.get('allowedSettings'): + return False + + allowed_settings = content['allowedSettings'] + if enable_connections and not allowed_settings.get("connections"): + return False + if enable_tunneling and not allowed_settings.get("portForwards"): + return False + if enable_rotation and not allowed_settings.get("rotation"): + return False + if allowed_settings.get("connections") and allowed_settings["connections"]: + if enable_session_recording and not allowed_settings.get("sessionRecording"): + return False + if enable_typescript_recording and not allowed_settings.get("typescriptRecording"): + return False + if remote_browser_isolation and not allowed_settings.get("remoteBrowserIsolation"): + return False + return True + + @staticmethod + def _convert_allowed_setting(value): + """Converts on/off/default|any to True/False/None""" + if value is None or isinstance(value, bool): + return value + return {"on": True, "off": False}.get(str(value).lower(), None) + + def edit_tunneling_config(self, connections=None, tunneling=None, + rotation=None, session_recording=None, + typescript_recording=None, + remote_browser_isolation=None): + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + if config_vertex is None: + config_vertex = self.linking_dag.add_vertex(uid=self.record.record_uid, vertex_type=RefType.PAM_NETWORK) + + if config_vertex.vertex_type != RefType.PAM_NETWORK: + config_vertex.vertex_type = RefType.PAM_NETWORK + content = get_vertex_content(config_vertex) + if content and content.get('allowedSettings'): + allowed_settings = dict(content['allowedSettings']) + del content['allowedSettings'] + content = {'allowedSettings': allowed_settings} + + if content is None: + content = {'allowedSettings': {}} + if 'allowedSettings' not in content: + content['allowedSettings'] = {} + + allowed_settings = content['allowedSettings'] + dirty = False + + if connections is not None: + connections = self._convert_allowed_setting(connections) + if connections != allowed_settings.get("connections", None): + dirty = True + if connections is None: + allowed_settings.pop("connections", None) + else: + allowed_settings["connections"] = connections + + if tunneling is not None: + tunneling = self._convert_allowed_setting(tunneling) + if tunneling != allowed_settings.get("portForwards", None): + dirty = True + if tunneling is None: + allowed_settings.pop("portForwards", None) + else: + allowed_settings["portForwards"] = tunneling + + if rotation is not None: + rotation = self._convert_allowed_setting(rotation) + if rotation != allowed_settings.get("rotation", None): + dirty = True + if rotation is None: + allowed_settings.pop("rotation", None) + else: + allowed_settings["rotation"] = rotation + + if session_recording is not None: + session_recording = self._convert_allowed_setting(session_recording) + if session_recording != allowed_settings.get("sessionRecording", None): + dirty = True + if session_recording is None: + allowed_settings.pop("sessionRecording", None) + else: + allowed_settings["sessionRecording"] = session_recording + + if typescript_recording is not None: + typescript_recording = self._convert_allowed_setting(typescript_recording) + if typescript_recording != allowed_settings.get("typescriptRecording", None): + dirty = True + if typescript_recording is None: + allowed_settings.pop("typescriptRecording", None) + else: + allowed_settings["typescriptRecording"] = typescript_recording + + if remote_browser_isolation is not None: + remote_browser_isolation = self._convert_allowed_setting(remote_browser_isolation) + if remote_browser_isolation != allowed_settings.get("remoteBrowserIsolation", None): + dirty = True + if remote_browser_isolation is None: + allowed_settings.pop("remoteBrowserIsolation", None) + else: + allowed_settings["remoteBrowserIsolation"] = remote_browser_isolation + + if dirty: + config_vertex.add_data(content=content, path='meta', needs_encryption=False) + self.linking_dag.save() + + def get_all_owners(self, uid): + owners = [] + if self.linking_dag.has_graph: + vertex = self.linking_dag.get_vertex(uid) + if vertex: + owners = [owner.uid for owner in vertex.belongs_to_vertices()] + return owners + + def user_belongs_to_resource(self, user_uid, resource_uid): + user_vertex = self.linking_dag.get_vertex(user_uid) + resource_vertex = self.linking_dag.get_vertex(resource_uid) + res_content = False + if user_vertex and resource_vertex and resource_vertex.has(user_vertex, EdgeType.ACL): + acl_edge = user_vertex.get_edge(resource_vertex, EdgeType.ACL) + _content = acl_edge.content_as_dict + res_content = _content.get('belongs_to', False) if _content else False + return res_content + + def get_resource_uid(self, user_uid): + if not self.linking_dag.has_graph: + return None + resources = self.get_all_owners(user_uid) + if len(resources) > 0: + for resource in resources: + if self.user_belongs_to_resource(user_uid, resource): + return resource + return None + + def link_resource_to_config(self, resource_uid): + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + if config_vertex is None: + config_vertex = self.linking_dag.add_vertex(uid=self.record.record_uid) + + resource_vertex = self.linking_dag.get_vertex(resource_uid) + if resource_vertex is None: + resource_vertex = self.linking_dag.add_vertex(uid=resource_uid) + + if not config_vertex.has(resource_vertex, EdgeType.LINK): + resource_vertex.belongs_to(config_vertex, EdgeType.LINK) + self.linking_dag.save() + + def link_user_to_config(self, user_uid): + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + if config_vertex is None: + config_vertex = self.linking_dag.add_vertex(uid=self.record.record_uid) + self.link_user(user_uid, config_vertex, belongs_to=True, is_iam_user=True) + + def link_user_to_config_with_options(self, user_uid, is_admin=None, belongs_to=None, is_iam_user=None): + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + if config_vertex is None: + config_vertex = self.linking_dag.add_vertex(uid=self.record.record_uid) + + # self.link_user(user_uid, config_vertex, is_admin, belongs_to, is_iam_user) + source_vertex = config_vertex + user_vertex = self.linking_dag.get_vertex(user_uid) + if user_vertex is None: + user_vertex = self.linking_dag.add_vertex(uid=user_uid, vertex_type=RefType.PAM_USER) + + # switching to 3-state on/off/default: on/true, off/false, + # None = Keep existing, 'default' = Reset to default (remove from dict) + states = {'on': True, 'off': False, 'default': '', 'none': None} + + content = { + "belongs_to": states.get(str(belongs_to).lower()), + "is_admin": states.get(str(is_admin).lower()), + "is_iam_user": states.get(str(is_iam_user).lower()) + } + if user_vertex.vertex_type != RefType.PAM_USER: + user_vertex.vertex_type = RefType.PAM_USER + + dirty = False + if source_vertex.has(user_vertex, EdgeType.ACL): + acl_edge = user_vertex.get_edge(source_vertex, EdgeType.ACL) + existing_content = acl_edge.content_as_dict or {} + old_content = existing_content.copy() + for key in list(existing_content.keys()): + if content.get(key) is not None: + if content[key] == '': + existing_content.pop(key) + elif content[key] in (True, False): + existing_content[key] = content[key] + content = {k: v for k, v in content.items() if v not in (None, '')} + for k, v in content.items(): + existing_content.setdefault(k, v) + if existing_content != old_content: + dirty = True + + if dirty: + user_vertex.belongs_to(source_vertex, EdgeType.ACL, content=existing_content) + # user_vertex.add_data(content=existing_content, needs_encryption=False) + self.linking_dag.save() + else: + content = {k: v for k, v in content.items() if v not in (None, '')} + user_vertex.belongs_to(source_vertex, EdgeType.ACL, content=content) + self.linking_dag.save() + + def unlink_user_from_resource(self, user_uid, resource_uid) -> bool: + resource_vertex = self.linking_dag.get_vertex(resource_uid) + if resource_vertex is None or not self.resource_belongs_to_config(resource_uid): + logger.error(f"Resource {resource_uid} does not belong to the configuration") + return False + + user_vertex = self.linking_dag.get_vertex(user_uid) + if user_vertex is None or user_vertex.vertex_type != RefType.PAM_USER: + return False + + if resource_vertex.has(user_vertex, EdgeType.ACL): + acl_edge = user_vertex.get_edge(resource_vertex, EdgeType.ACL) + edge_content = acl_edge.content_as_dict or {} + link_keys = ('belongs_to', 'is_admin') # "is_iam_user" + dirty = any(key in link_keys for key in edge_content) + if dirty: + for link_key in link_keys: + edge_content.pop(link_key, None) + user_vertex.belongs_to(resource_vertex, EdgeType.ACL, content=edge_content) + self.linking_dag.save() + return True + + return False + + def link_user_to_resource(self, user_uid, resource_uid, is_admin=None, belongs_to=None): + resource_vertex = self.linking_dag.get_vertex(resource_uid) + if resource_vertex is None or not self.resource_belongs_to_config(resource_uid): + logger.error(f"Resource {resource_uid} does not belong to the configuration") + return False + self.link_user(user_uid, resource_vertex, is_admin, belongs_to) + return None + + def link_user(self, user_uid, source_vertex: DAGVertex, is_admin=None, belongs_to=None, is_iam_user=None): + + user_vertex = self.linking_dag.get_vertex(user_uid) + if user_vertex is None: + user_vertex = self.linking_dag.add_vertex(uid=user_uid, vertex_type=RefType.PAM_USER) + + content = {} + dirty = False + if belongs_to is not None: + content["belongs_to"] = bool(belongs_to) + if is_admin is not None: + content["is_admin"] = bool(is_admin) + if is_iam_user is not None: + content["is_iam_user"] = bool(is_iam_user) + + if user_vertex.vertex_type != RefType.PAM_USER: + user_vertex.vertex_type = RefType.PAM_USER + + if source_vertex.has(user_vertex, EdgeType.ACL): + acl_edge = user_vertex.get_edge(source_vertex, EdgeType.ACL) + existing_content = acl_edge.content_as_dict or {} + for key in existing_content: + if key not in content: + content[key] = existing_content[key] + if content != existing_content: + dirty = True + + if dirty: + user_vertex.belongs_to(source_vertex, EdgeType.ACL, content=content) + self.linking_dag.save() + else: + user_vertex.belongs_to(source_vertex, EdgeType.ACL, content=content) + self.linking_dag.save() + + def get_all_admins(self): + if not self.linking_dag.has_graph: + return [] + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + if config_vertex is None: + return [] + admins = [] + for user_vertex in config_vertex.has_vertices(EdgeType.ACL): + acl_edge = user_vertex.get_edge(config_vertex, EdgeType.ACL) + if acl_edge: + content = acl_edge.content_as_dict + if content.get('is_admin'): + admins.append(user_vertex.uid) + return admins + + def check_if_resource_has_admin(self, resource_uid): + resource_vertex = self.linking_dag.get_vertex(resource_uid) + if resource_vertex is None: + return False + for user_vertex in resource_vertex.has_vertices(EdgeType.ACL): + acl_edge = user_vertex.get_edge(resource_vertex, EdgeType.ACL) + if acl_edge: + content = acl_edge.content_as_dict + if content.get('is_admin'): + return user_vertex.uid + return False + + def check_if_resource_allowed(self, resource_uid, setting): + resource_vertex = self.linking_dag.get_vertex(resource_uid) + content = get_vertex_content(resource_vertex) + return content.get('allowedSettings', {}).get(setting, False) if content else False + + def get_resource_setting(self, resource_uid: str, settings_name: str, setting: str) -> str: + # Settings are tri-state (on|off|default) mapped to true|false|missing in JSON + # When set to "default" (missing from JSON) that means look higher up the hierarchy + # ex. rotation: user -> machine -> pam_config -> Gobal Default settings + # Note: Different clients (even different client versions) + # may have different view on these defaults (Commander, Web Vault, etc.) + resource_vertex = self.linking_dag.get_vertex(resource_uid) + content = get_vertex_content(resource_vertex) + res = '' + if content and isinstance(content, dict): + if settings_name in content and isinstance(content[settings_name], dict): + if setting in content[settings_name]: + value = content[settings_name][setting] + if isinstance(value, bool): + res = {True: 'on', False: 'off'}[value] + else: + res = str(value) + else: + res = 'default' + + return res + + def set_resource_allowed(self, resource_uid, tunneling=None, connections=None, rotation=None, + session_recording=None, typescript_recording=None, remote_browser_isolation=None, + allowed_settings_name='allowedSettings', is_config=False, + v_type: RefType=str(RefType.PAM_MACHINE)): + v_type = RefType(v_type) + allowed_ref_types = [RefType.PAM_MACHINE, RefType.PAM_DATABASE, RefType.PAM_DIRECTORY, RefType.PAM_BROWSER] + if v_type not in allowed_ref_types: + # default to machine + v_type = RefType.PAM_MACHINE + + resource_vertex = self.linking_dag.get_vertex(resource_uid) + if resource_vertex is None: + resource_vertex = self.linking_dag.add_vertex(uid=resource_uid, vertex_type=v_type) + + if resource_vertex.vertex_type not in allowed_ref_types: + resource_vertex.vertex_type = v_type + if is_config: + resource_vertex.vertex_type = RefType.PAM_NETWORK + dirty = False + content = get_vertex_content(resource_vertex) + if content is None: + content = {allowed_settings_name: {}} + dirty = True + if allowed_settings_name not in content: + content[allowed_settings_name] = {} + dirty = True + + settings = content[allowed_settings_name] + + # When no value in allowedSettings: client will substitute with default + # rotation defaults to True, everything else defaults to False + + # switching to 3-state on/off/default: on/true, off/false, + # None = Keep existing, 'default' = Reset to default (remove from dict) + if connections is not None: + connections = self._convert_allowed_setting(connections) + if connections != settings.get("connections", None): + dirty = True + if connections is None: + settings.pop("connections", None) + else: + settings["connections"] = connections + + if tunneling is not None: + tunneling = self._convert_allowed_setting(tunneling) + if tunneling != settings.get("portForwards", None): + dirty = True + if tunneling is None: + settings.pop("portForwards", None) + else: + settings["portForwards"] = tunneling + + if rotation is not None: + rotation = self._convert_allowed_setting(rotation) + if rotation != settings.get("rotation", None): + dirty = True + if rotation is None: + settings.pop("rotation", None) + else: + settings["rotation"] = rotation + + if session_recording is not None: + session_recording = self._convert_allowed_setting(session_recording) + if session_recording != settings.get("sessionRecording", None): + dirty = True + if session_recording is None: + settings.pop("sessionRecording", None) + else: + settings["sessionRecording"] = session_recording + + if typescript_recording is not None: + typescript_recording = self._convert_allowed_setting(typescript_recording) + if typescript_recording != settings.get("typescriptRecording", None): + dirty = True + if typescript_recording is None: + settings.pop("typescriptRecording", None) + else: + settings["typescriptRecording"] = typescript_recording + + if remote_browser_isolation is not None: + remote_browser_isolation = self._convert_allowed_setting(remote_browser_isolation) + if remote_browser_isolation != settings.get("remoteBrowserIsolation", None): + dirty = True + if remote_browser_isolation is None: + settings.pop("remoteBrowserIsolation", None) + else: + settings["remoteBrowserIsolation"] = remote_browser_isolation + + if dirty: + resource_vertex.add_data(content=content, path='meta', needs_encryption=False) + self.linking_dag.save() + + def is_tunneling_config_set_up(self, resource_uid): + if not self.linking_dag.has_graph: + return False + resource_vertex = self.linking_dag.get_vertex(resource_uid) + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + return resource_vertex and config_vertex and config_vertex in resource_vertex.belongs_to_vertices() + + def remove_from_dag(self, uid): + if not self.linking_dag.has_graph: + return True + + vertex = self.linking_dag.get_vertex(uid) + if vertex is None: + return True + + vertex.delete() + self.linking_dag.save(confirm=True) + return None + + def print_tunneling_config(self, record_uid, pam_settings=None, config_uid=None): + if not pam_settings and not config_uid: + return + self.linking_dag.load() + vertex = self.linking_dag.get_vertex(record_uid) + content = get_vertex_content(vertex) + config_id = config_uid if config_uid else pam_settings.value[0].get('configUid') if pam_settings else None + if content and content.get('allowedSettings'): + allowed_settings = content['allowedSettings'] + logger.info(f"Settings configured for {record_uid}") + port_forwarding = f"Enabled" if allowed_settings.get('portForwards') else \ + "Disabled" + rotation = "Disabled" if (allowed_settings.get('rotation') and not allowed_settings['rotation']) else "Enabled" + logger.info(f"\tRotation: {rotation}") + logger.info(f"\tTunneling: {port_forwarding}") + + logger.info(f"Configuration: {config_id}") + if config_id is not None: + config_vertex = self.linking_dag.get_vertex(self.record.record_uid) + config_content = get_vertex_content(config_vertex) + if config_content and config_content.get('allowedSettings'): + config_allowed_settings = config_content['allowedSettings'] + config_port_forwarding = "Enabled" if ( + config_allowed_settings.get('portForwards')) else \ + "Disabled" + config_rotation = "Disabled" if (config_allowed_settings.get('rotation') and + not config_allowed_settings['rotation']) else \ + "Enabled" + logger.info(f"\tRotation: {config_rotation}") + logger.info(f"\tTunneling: {config_port_forwarding}") diff --git a/keepersdk-package/src/keepersdk/helpers/tunnel/tunnel_utils.py b/keepersdk-package/src/keepersdk/helpers/tunnel/tunnel_utils.py new file mode 100644 index 00000000..d774a5de --- /dev/null +++ b/keepersdk-package/src/keepersdk/helpers/tunnel/tunnel_utils.py @@ -0,0 +1,83 @@ +import json +import os +import logging +import requests + +from typing import Any, Dict, List, Optional + +from ... import errors, utils, crypto +from ...vault import vault_online +from ..keeper_dag.dag_crypto import generate_random_bytes +from ...authentication import endpoint + + +logger = logging.getLogger(__name__) + + +VERIFY_SSL = bool(os.environ.get("VERIFY_SSL", "TRUE") == "TRUE") + + +def get_config_uid(vault: vault_online.VaultOnline, encrypted_session_token: bytes, encrypted_transmission_key: bytes, record_uid: str) -> Optional[str]: + try: + rs = get_dag_leafs(vault, encrypted_session_token, encrypted_transmission_key, record_uid) + if not rs: + return None + else: + return rs[0].get('value', '') + except Exception as e: + logger.error(f"Error getting configuration: {e}") + return None + + +def get_dag_leafs(vault: vault_online.VaultOnline, encrypted_session_token: bytes, encrypted_transmission_key: bytes, record_id: str) -> Optional[List[Dict[str, Any]]]: + """ + POST a stringified JSON object to /api/dag/get_leafs on the KRouter + The object is: + { + vertex: string, + graphId: number + } + """ + krouter_host = f"https://{vault.keeper_auth.keeper_endpoint.get_router_server()}" + path = '/api/user/get_leafs' + + payload = { + 'vertex': record_id, + 'graphId': 0 + } + + try: + rs = requests.request('post', + krouter_host + path, + verify=VERIFY_SSL, + headers={ + 'TransmissionKey': utils.base64_url_encode(encrypted_transmission_key), + 'Authorization': f'KeeperUser {utils.base64_url_encode(encrypted_session_token)}' + }, + data=json.dumps(payload).encode('utf-8') + ) + except ConnectionError as e: + raise errors.KeeperApiError(-1, f"KRouter is not reachable on '{krouter_host}'. Error: ${e}") + except Exception as ex: + raise ex + + if rs.status_code == 200: + logger.debug("Found right host") + return rs.json() + else: + logger.warning("Looks like there is no such controller connected to the router.") + return None + + +def get_keeper_tokens(vault: vault_online.VaultOnline): + transmission_key = generate_random_bytes(32) + server_public_key = endpoint.SERVER_PUBLIC_KEYS[vault.keeper_auth.keeper_endpoint.server_key_id] + + if vault.keeper_auth.keeper_endpoint.server_key_id < 7: + encrypted_transmission_key = crypto.encrypt_rsa(transmission_key, server_public_key) + else: + encrypted_transmission_key = crypto.encrypt_ec(transmission_key, server_public_key) + encrypted_session_token = crypto.encrypt_aes_v2( + utils.base64_url_decode(vault.keeper_auth.auth_context.session_token), transmission_key) + + return encrypted_session_token, encrypted_transmission_key, transmission_key diff --git a/keepersdk-package/src/keepersdk/plugins/pedm/admin_plugin.py b/keepersdk-package/src/keepersdk/plugins/pedm/admin_plugin.py index 113f6606..0ccbe8f6 100644 --- a/keepersdk-package/src/keepersdk/plugins/pedm/admin_plugin.py +++ b/keepersdk-package/src/keepersdk/plugins/pedm/admin_plugin.py @@ -158,6 +158,7 @@ def __init__(self, loader: enterprise_loader.EnterpriseLoader): self._push_notifications.register_callback(self.on_push_message) self._push_notifications.connect_to_push_channel() self._need_sync = True + self._last_seen: Optional[Dict[str, int]] = None self.logger = utils.get_logger() def on_push_message(self, message: Dict[str, Any]): @@ -813,3 +814,42 @@ def modify_approvals(self, *, self._need_sync = True assert status_rs is not None return admin_types.ModifyStatus.from_proto(status_rs) + + def load_last_seen(self) -> None: + if self._last_seen is None: + self._last_seen = {} + else: + self._last_seen.clear() + auth = self.loader.keeper_auth + rq = pedm_pb2.GetAgentLastSeenRequest() + rq.activeOnly = True + last_seen_rs = auth.execute_router( + 'pedm/get_agent_last_seen', rq, response_type=pedm_pb2.GetAgentLastSeenResponse) + assert last_seen_rs is not None + for ls in last_seen_rs.lastSeen: + self._last_seen[utils.base64_url_encode(ls.agentUid)] = ls.lastSeen + + def agent_last_seen(self, agent_uid: str) -> Optional[datetime.datetime]: + if self._last_seen is None: + self.load_last_seen() + + if agent_uid in self._last_seen: + millis = self._last_seen[agent_uid] + return datetime.datetime.fromtimestamp(millis / 1000) + + return None + def extend_approvals(self, *, to_extend: Optional[List[admin_types.PedmUpdateApproval]] = None) -> admin_types.ModifyStatus: + auth = self.loader.keeper_auth + + rq = pedm_pb2.ModifyApprovalRequest() + if isinstance(to_extend, list): + for update in to_extend: + au = pedm_pb2.ApprovalExtendData() + au.approvalUid = utils.base64_url_decode(update.approval_uid) + au.expireIn = update.expire_in + rq.extendApproval.append(au) + + status_rs = auth.execute_router('pedm/modify_approval', rq, response_type=pedm_pb2.PedmStatusResponse) + self._need_sync = True + assert status_rs is not None + return admin_types.ModifyStatus.from_proto(status_rs) diff --git a/keepersdk-package/src/keepersdk/plugins/pedm/admin_types.py b/keepersdk-package/src/keepersdk/plugins/pedm/admin_types.py index 98e725e0..22b2b499 100644 --- a/keepersdk-package/src/keepersdk/plugins/pedm/admin_types.py +++ b/keepersdk-package/src/keepersdk/plugins/pedm/admin_types.py @@ -90,6 +90,12 @@ def uid(self) -> str: return self.approval_uid +@attrs.define(kw_only=True, frozen=True) +class PedmUpdateApproval: + approval_uid: str + expire_in: int + + @attrs.define(kw_only=True) class AddDeployment: name: str diff --git a/keepersdk-package/src/keepersdk/proto/pedm_pb2.py b/keepersdk-package/src/keepersdk/proto/pedm_pb2.py index 840a83a1..324a4b0c 100644 --- a/keepersdk-package/src/keepersdk/proto/pedm_pb2.py +++ b/keepersdk-package/src/keepersdk/proto/pedm_pb2.py @@ -26,7 +26,7 @@ from . import NotificationCenter_pb2 as NotificationCenter__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\npedm.proto\x12\x04PEDM\x1a\x0c\x66older.proto\x1a\x18NotificationCenter.proto\"O\n\x17PEDMTOTPValidateRequest\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x02 \x01(\x05\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05\";\n\nPedmStatus\x12\x0b\n\x03key\x18\x01 \x03(\x0c\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0f\n\x07message\x18\x03 \x01(\t\"\x89\x01\n\x12PedmStatusResponse\x12#\n\taddStatus\x18\x01 \x03(\x0b\x32\x10.PEDM.PedmStatus\x12&\n\x0cupdateStatus\x18\x02 \x03(\x0b\x32\x10.PEDM.PedmStatus\x12&\n\x0cremoveStatus\x18\x03 \x03(\x0b\x32\x10.PEDM.PedmStatus\"4\n\x0e\x44\x65ploymentData\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x65\x63PrivateKey\x18\x02 \x01(\x0c\"\x9a\x01\n\x17\x44\x65ploymentCreateRequest\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x0e\n\x06\x61\x65sKey\x18\x02 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x03 \x01(\x0c\x12\x19\n\x11spiffeCertificate\x18\x04 \x01(\x0c\x12\x15\n\rencryptedData\x18\x05 \x01(\x0c\x12\x11\n\tagentData\x18\x06 \x01(\x0c\"\x8d\x01\n\x17\x44\x65ploymentUpdateRequest\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x15\n\rencryptedData\x18\x02 \x01(\x0c\x12)\n\x08\x64isabled\x18\x03 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x19\n\x11spiffeCertificate\x18\x04 \x01(\x0c\"\xa2\x01\n\x17ModifyDeploymentRequest\x12\x34\n\raddDeployment\x18\x01 \x03(\x0b\x32\x1d.PEDM.DeploymentCreateRequest\x12\x37\n\x10updateDeployment\x18\x02 \x03(\x0b\x32\x1d.PEDM.DeploymentUpdateRequest\x12\x18\n\x10removeDeployment\x18\x03 \x03(\x0c\"a\n\x0b\x41gentUpdate\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12)\n\x08\x64isabled\x18\x02 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x15\n\rdeploymentUid\x18\x03 \x01(\x0c\"Q\n\x12ModifyAgentRequest\x12&\n\x0bupdateAgent\x18\x02 \x03(\x0b\x32\x11.PEDM.AgentUpdate\x12\x13\n\x0bremoveAgent\x18\x03 \x03(\x0c\"p\n\tPolicyAdd\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x14\n\x0c\x65ncryptedKey\x18\x04 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\"v\n\x0cPolicyUpdate\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12)\n\x08\x64isabled\x18\x04 \x01(\x0e\x32\x17.Folder.SetBooleanValue\"s\n\rPolicyRequest\x12\"\n\taddPolicy\x18\x01 \x03(\x0b\x32\x0f.PEDM.PolicyAdd\x12(\n\x0cupdatePolicy\x18\x02 \x03(\x0b\x32\x12.PEDM.PolicyUpdate\x12\x14\n\x0cremovePolicy\x18\x03 \x03(\x0c\"6\n\nPolicyLink\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x15\n\rcollectionUid\x18\x02 \x03(\x0c\"E\n\x1aSetPolicyCollectionRequest\x12\'\n\rsetCollection\x18\x01 \x03(\x0b\x32\x10.PEDM.PolicyLink\"W\n\x0f\x43ollectionValue\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63ollectionType\x18\x02 \x01(\x05\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\"z\n\x12\x43ollectionLinkData\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x0f\n\x07linkUid\x18\x02 \x01(\x0c\x12*\n\x08linkType\x18\x03 \x01(\x0e\x32\x18.PEDM.CollectionLinkType\x12\x10\n\x08linkData\x18\x04 \x01(\x0c\"\x8c\x01\n\x11\x43ollectionRequest\x12,\n\raddCollection\x18\x01 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12/\n\x10updateCollection\x18\x02 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12\x18\n\x10removeCollection\x18\x03 \x03(\x0c\"{\n\x18SetCollectionLinkRequest\x12/\n\raddCollection\x18\x01 \x03(\x0b\x32\x18.PEDM.CollectionLinkData\x12.\n\x10removeCollection\x18\x02 \x03(\x0b\x32\x14.PEDM.CollectionLink\"F\n\x15\x41pprovalActionRequest\x12\x0f\n\x07\x61pprove\x18\x01 \x03(\x0c\x12\x0c\n\x04\x64\x65ny\x18\x02 \x03(\x0c\x12\x0e\n\x06remove\x18\x03 \x03(\x0c\"\xab\x01\n\x0e\x44\x65ploymentNode\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x02 \x01(\x08\x12\x0e\n\x06\x61\x65sKey\x18\x03 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x04 \x01(\x0c\x12\x15\n\rencryptedData\x18\x05 \x01(\x0c\x12\x11\n\tagentData\x18\x06 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x03\x12\x10\n\x08modified\x18\x08 \x01(\x03\"\xa8\x01\n\tAgentNode\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12\x11\n\tmachineId\x18\x02 \x01(\t\x12\x15\n\rdeploymentUid\x18\x03 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x04 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\x12\x15\n\rencryptedData\x18\x06 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x03\x12\x10\n\x08modified\x18\x08 \x01(\x03\"\x94\x01\n\nPolicyNode\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x14\n\x0c\x65ncryptedKey\x18\x04 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x05 \x01(\x03\x12\x10\n\x08modified\x18\x06 \x01(\x03\x12\x10\n\x08\x64isabled\x18\x07 \x01(\x08\"g\n\x0e\x43ollectionNode\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63ollectionType\x18\x02 \x01(\x05\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\"d\n\x0e\x43ollectionLink\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x0f\n\x07linkUid\x18\x02 \x01(\x0c\x12*\n\x08linkType\x18\x03 \x01(\x0e\x32\x18.PEDM.CollectionLinkType\"\x9d\x01\n\x12\x41pprovalStatusNode\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x46\n\x0e\x61pprovalStatus\x18\x02 \x01(\x0e\x32..NotificationCenter.NotificationApprovalStatus\x12\x18\n\x10\x65nterpriseUserId\x18\x03 \x01(\x03\x12\x10\n\x08modified\x18\n \x01(\x03\"\xb3\x01\n\x0c\x41pprovalNode\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x14\n\x0c\x61pprovalType\x18\x02 \x01(\x05\x12\x10\n\x08\x61gentUid\x18\x03 \x01(\x0c\x12\x13\n\x0b\x61\x63\x63ountInfo\x18\x04 \x01(\x0c\x12\x17\n\x0f\x61pplicationInfo\x18\x05 \x01(\x0c\x12\x15\n\rjustification\x18\x06 \x01(\x0c\x12\x10\n\x08\x65xpireIn\x18\x07 \x01(\x05\x12\x0f\n\x07\x63reated\x18\n \x01(\x03\"C\n\rFullSyncToken\x12\x15\n\rstartRevision\x18\x01 \x01(\x03\x12\x0e\n\x06\x65ntity\x18\x02 \x01(\x05\x12\x0b\n\x03key\x18\x03 \x03(\x0c\"$\n\x0cIncSyncToken\x12\x14\n\x0clastRevision\x18\x02 \x01(\x03\"h\n\rPedmSyncToken\x12\'\n\x08\x66ullSync\x18\x02 \x01(\x0b\x32\x13.PEDM.FullSyncTokenH\x00\x12%\n\x07incSync\x18\x03 \x01(\x0b\x32\x12.PEDM.IncSyncTokenH\x00\x42\x07\n\x05token\"/\n\x12GetPedmDataRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\"\xad\x04\n\x13GetPedmDataResponse\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x12\n\nresetCache\x18\x02 \x01(\x08\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\x12\x1a\n\x12removedDeployments\x18\n \x03(\x0c\x12\x15\n\rremovedAgents\x18\x0b \x03(\x0c\x12\x17\n\x0fremovedPolicies\x18\x0c \x03(\x0c\x12\x19\n\x11removedCollection\x18\r \x03(\x0c\x12\x33\n\x15removedCollectionLink\x18\x0e \x03(\x0b\x32\x14.PEDM.CollectionLink\x12\x18\n\x10removedApprovals\x18\x0f \x03(\x0c\x12)\n\x0b\x64\x65ployments\x18\x14 \x03(\x0b\x32\x14.PEDM.DeploymentNode\x12\x1f\n\x06\x61gents\x18\x15 \x03(\x0b\x32\x0f.PEDM.AgentNode\x12\"\n\x08policies\x18\x16 \x03(\x0b\x32\x10.PEDM.PolicyNode\x12)\n\x0b\x63ollections\x18\x17 \x03(\x0b\x32\x14.PEDM.CollectionNode\x12,\n\x0e\x63ollectionLink\x18\x18 \x03(\x0b\x32\x14.PEDM.CollectionLink\x12%\n\tapprovals\x18\x19 \x03(\x0b\x32\x12.PEDM.ApprovalNode\x12\x30\n\x0e\x61pprovalStatus\x18\x1a \x03(\x0b\x32\x18.PEDM.ApprovalStatusNode\"<\n\x12PolicyAgentRequest\x12\x11\n\tpolicyUid\x18\x01 \x03(\x0c\x12\x13\n\x0bsummaryOnly\x18\x02 \x01(\x08\";\n\x13PolicyAgentResponse\x12\x12\n\nagentCount\x18\x01 \x01(\x05\x12\x10\n\x08\x61gentUid\x18\x02 \x03(\x0c\"]\n\x16\x41uditCollectionRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x10\n\x08valueUid\x18\x02 \x03(\x0c\x12\x16\n\x0e\x63ollectionName\x18\x03 \x03(\t\"h\n\x14\x41uditCollectionValue\x12\x16\n\x0e\x63ollectionName\x18\x01 \x01(\t\x12\x10\n\x08valueUid\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\"q\n\x17\x41uditCollectionResponse\x12*\n\x06values\x18\x01 \x03(\x0b\x32\x1a.PEDM.AuditCollectionValue\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12\x19\n\x11\x63ontinuationToken\x18\x03 \x01(\x0c\"H\n\x18GetCollectionLinkRequest\x12,\n\x0e\x63ollectionLink\x18\x01 \x03(\x0b\x32\x14.PEDM.CollectionLink\"Q\n\x19GetCollectionLinkResponse\x12\x34\n\x12\x63ollectionLinkData\x18\x01 \x03(\x0b\x32\x18.PEDM.CollectionLinkData\"2\n\x1aGetActiveAgentCountRequest\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x03(\x05\">\n\x10\x41\x63tiveAgentCount\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x14\n\x0c\x61\x63tiveAgents\x18\x02 \x01(\x05\";\n\x12\x41\x63tiveAgentFailure\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"x\n\x1bGetActiveAgentCountResponse\x12*\n\nagentCount\x18\x01 \x03(\x0b\x32\x16.PEDM.ActiveAgentCount\x12-\n\x0b\x66\x61iledCount\x18\x02 \x03(\x0b\x32\x18.PEDM.ActiveAgentFailure\"\x87\x01\n\x19GetAgentDailyCountRequest\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x03(\x05\x12$\n\tmonthYear\x18\x02 \x01(\x0b\x32\x0f.PEDM.MonthYearH\x00\x12$\n\tdateRange\x18\x03 \x01(\x0b\x32\x0f.PEDM.DateRangeH\x00\x42\x08\n\x06period\"(\n\tMonthYear\x12\r\n\x05month\x18\x01 \x01(\x05\x12\x0c\n\x04year\x18\x02 \x01(\x05\"\'\n\tDateRange\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x03\"3\n\x0f\x41gentDailyCount\x12\x0c\n\x04\x64\x61te\x18\x01 \x01(\x03\x12\x12\n\nagentCount\x18\x02 \x01(\x05\"V\n\x17\x41gentCountForEnterprise\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12%\n\x06\x63ounts\x18\x02 \x03(\x0b\x32\x15.PEDM.AgentDailyCount\"U\n\x1aGetAgentDailyCountResponse\x12\x37\n\x10\x65nterpriseCounts\x18\x01 \x03(\x0b\x32\x1d.PEDM.AgentCountForEnterprise*j\n\x12\x43ollectionLinkType\x12\r\n\tCLT_OTHER\x10\x00\x12\r\n\tCLT_AGENT\x10\x01\x12\x0e\n\nCLT_POLICY\x10\x02\x12\x12\n\x0e\x43LT_COLLECTION\x10\x03\x12\x12\n\x0e\x43LT_DEPLOYMENT\x10\x04\x42 \n\x18\x63om.keepersecurity.protoB\x04PEDMb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\npedm.proto\x12\x04PEDM\x1a\x0c\x66older.proto\x1a\x18NotificationCenter.proto\"O\n\x17PEDMTOTPValidateRequest\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x14\n\x0c\x65nterpriseId\x18\x02 \x01(\x05\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05\";\n\nPedmStatus\x12\x0b\n\x03key\x18\x01 \x03(\x0c\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x0f\n\x07message\x18\x03 \x01(\t\"\x89\x01\n\x12PedmStatusResponse\x12#\n\taddStatus\x18\x01 \x03(\x0b\x32\x10.PEDM.PedmStatus\x12&\n\x0cupdateStatus\x18\x02 \x03(\x0b\x32\x10.PEDM.PedmStatus\x12&\n\x0cremoveStatus\x18\x03 \x03(\x0b\x32\x10.PEDM.PedmStatus\"4\n\x0e\x44\x65ploymentData\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x65\x63PrivateKey\x18\x02 \x01(\x0c\"\x9a\x01\n\x17\x44\x65ploymentCreateRequest\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x0e\n\x06\x61\x65sKey\x18\x02 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x03 \x01(\x0c\x12\x19\n\x11spiffeCertificate\x18\x04 \x01(\x0c\x12\x15\n\rencryptedData\x18\x05 \x01(\x0c\x12\x11\n\tagentData\x18\x06 \x01(\x0c\"\x8d\x01\n\x17\x44\x65ploymentUpdateRequest\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x15\n\rencryptedData\x18\x02 \x01(\x0c\x12)\n\x08\x64isabled\x18\x03 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x19\n\x11spiffeCertificate\x18\x04 \x01(\x0c\"\xa2\x01\n\x17ModifyDeploymentRequest\x12\x34\n\raddDeployment\x18\x01 \x03(\x0b\x32\x1d.PEDM.DeploymentCreateRequest\x12\x37\n\x10updateDeployment\x18\x02 \x03(\x0b\x32\x1d.PEDM.DeploymentUpdateRequest\x12\x18\n\x10removeDeployment\x18\x03 \x03(\x0c\"a\n\x0b\x41gentUpdate\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12)\n\x08\x64isabled\x18\x02 \x01(\x0e\x32\x17.Folder.SetBooleanValue\x12\x15\n\rdeploymentUid\x18\x03 \x01(\x0c\"Q\n\x12ModifyAgentRequest\x12&\n\x0bupdateAgent\x18\x02 \x03(\x0b\x32\x11.PEDM.AgentUpdate\x12\x13\n\x0bremoveAgent\x18\x03 \x03(\x0c\"p\n\tPolicyAdd\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x14\n\x0c\x65ncryptedKey\x18\x04 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\"v\n\x0cPolicyUpdate\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12)\n\x08\x64isabled\x18\x04 \x01(\x0e\x32\x17.Folder.SetBooleanValue\"s\n\rPolicyRequest\x12\"\n\taddPolicy\x18\x01 \x03(\x0b\x32\x0f.PEDM.PolicyAdd\x12(\n\x0cupdatePolicy\x18\x02 \x03(\x0b\x32\x12.PEDM.PolicyUpdate\x12\x14\n\x0cremovePolicy\x18\x03 \x03(\x0c\"6\n\nPolicyLink\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x15\n\rcollectionUid\x18\x02 \x03(\x0c\"E\n\x1aSetPolicyCollectionRequest\x12\'\n\rsetCollection\x18\x01 \x03(\x0b\x32\x10.PEDM.PolicyLink\"W\n\x0f\x43ollectionValue\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63ollectionType\x18\x02 \x01(\x05\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\"z\n\x12\x43ollectionLinkData\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x0f\n\x07linkUid\x18\x02 \x01(\x0c\x12*\n\x08linkType\x18\x03 \x01(\x0e\x32\x18.PEDM.CollectionLinkType\x12\x10\n\x08linkData\x18\x04 \x01(\x0c\"\x8c\x01\n\x11\x43ollectionRequest\x12,\n\raddCollection\x18\x01 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12/\n\x10updateCollection\x18\x02 \x03(\x0b\x32\x15.PEDM.CollectionValue\x12\x18\n\x10removeCollection\x18\x03 \x03(\x0c\"{\n\x18SetCollectionLinkRequest\x12/\n\raddCollection\x18\x01 \x03(\x0b\x32\x18.PEDM.CollectionLinkData\x12.\n\x10removeCollection\x18\x02 \x03(\x0b\x32\x14.PEDM.CollectionLink\";\n\x12\x41pprovalExtendData\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x65xpireIn\x18\x02 \x01(\x05\"I\n\x15ModifyApprovalRequest\x12\x30\n\x0e\x65xtendApproval\x18\x01 \x03(\x0b\x32\x18.PEDM.ApprovalExtendData\"F\n\x15\x41pprovalActionRequest\x12\x0f\n\x07\x61pprove\x18\x01 \x03(\x0c\x12\x0c\n\x04\x64\x65ny\x18\x02 \x03(\x0c\x12\x0e\n\x06remove\x18\x03 \x03(\x0c\"\xab\x01\n\x0e\x44\x65ploymentNode\x12\x15\n\rdeploymentUid\x18\x01 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x02 \x01(\x08\x12\x0e\n\x06\x61\x65sKey\x18\x03 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x04 \x01(\x0c\x12\x15\n\rencryptedData\x18\x05 \x01(\x0c\x12\x11\n\tagentData\x18\x06 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x03\x12\x10\n\x08modified\x18\x08 \x01(\x03\"\xa8\x01\n\tAgentNode\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12\x11\n\tmachineId\x18\x02 \x01(\t\x12\x15\n\rdeploymentUid\x18\x03 \x01(\x0c\x12\x13\n\x0b\x65\x63PublicKey\x18\x04 \x01(\x0c\x12\x10\n\x08\x64isabled\x18\x05 \x01(\x08\x12\x15\n\rencryptedData\x18\x06 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x03\x12\x10\n\x08modified\x18\x08 \x01(\x03\"\x94\x01\n\nPolicyNode\x12\x11\n\tpolicyUid\x18\x01 \x01(\x0c\x12\x11\n\tplainData\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x14\n\x0c\x65ncryptedKey\x18\x04 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x05 \x01(\x03\x12\x10\n\x08modified\x18\x06 \x01(\x03\x12\x10\n\x08\x64isabled\x18\x07 \x01(\x08\"g\n\x0e\x43ollectionNode\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x16\n\x0e\x63ollectionType\x18\x02 \x01(\x05\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\"d\n\x0e\x43ollectionLink\x12\x15\n\rcollectionUid\x18\x01 \x01(\x0c\x12\x0f\n\x07linkUid\x18\x02 \x01(\x0c\x12*\n\x08linkType\x18\x03 \x01(\x0e\x32\x18.PEDM.CollectionLinkType\"\x9d\x01\n\x12\x41pprovalStatusNode\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x46\n\x0e\x61pprovalStatus\x18\x02 \x01(\x0e\x32..NotificationCenter.NotificationApprovalStatus\x12\x18\n\x10\x65nterpriseUserId\x18\x03 \x01(\x03\x12\x10\n\x08modified\x18\n \x01(\x03\"\xb3\x01\n\x0c\x41pprovalNode\x12\x13\n\x0b\x61pprovalUid\x18\x01 \x01(\x0c\x12\x14\n\x0c\x61pprovalType\x18\x02 \x01(\x05\x12\x10\n\x08\x61gentUid\x18\x03 \x01(\x0c\x12\x13\n\x0b\x61\x63\x63ountInfo\x18\x04 \x01(\x0c\x12\x17\n\x0f\x61pplicationInfo\x18\x05 \x01(\x0c\x12\x15\n\rjustification\x18\x06 \x01(\x0c\x12\x10\n\x08\x65xpireIn\x18\x07 \x01(\x05\x12\x0f\n\x07\x63reated\x18\n \x01(\x03\"C\n\rFullSyncToken\x12\x15\n\rstartRevision\x18\x01 \x01(\x03\x12\x0e\n\x06\x65ntity\x18\x02 \x01(\x05\x12\x0b\n\x03key\x18\x03 \x03(\x0c\"$\n\x0cIncSyncToken\x12\x14\n\x0clastRevision\x18\x02 \x01(\x03\"h\n\rPedmSyncToken\x12\'\n\x08\x66ullSync\x18\x02 \x01(\x0b\x32\x13.PEDM.FullSyncTokenH\x00\x12%\n\x07incSync\x18\x03 \x01(\x0b\x32\x12.PEDM.IncSyncTokenH\x00\x42\x07\n\x05token\"/\n\x12GetPedmDataRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\"\xad\x04\n\x13GetPedmDataResponse\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x12\n\nresetCache\x18\x02 \x01(\x08\x12\x0f\n\x07hasMore\x18\x03 \x01(\x08\x12\x1a\n\x12removedDeployments\x18\n \x03(\x0c\x12\x15\n\rremovedAgents\x18\x0b \x03(\x0c\x12\x17\n\x0fremovedPolicies\x18\x0c \x03(\x0c\x12\x19\n\x11removedCollection\x18\r \x03(\x0c\x12\x33\n\x15removedCollectionLink\x18\x0e \x03(\x0b\x32\x14.PEDM.CollectionLink\x12\x18\n\x10removedApprovals\x18\x0f \x03(\x0c\x12)\n\x0b\x64\x65ployments\x18\x14 \x03(\x0b\x32\x14.PEDM.DeploymentNode\x12\x1f\n\x06\x61gents\x18\x15 \x03(\x0b\x32\x0f.PEDM.AgentNode\x12\"\n\x08policies\x18\x16 \x03(\x0b\x32\x10.PEDM.PolicyNode\x12)\n\x0b\x63ollections\x18\x17 \x03(\x0b\x32\x14.PEDM.CollectionNode\x12,\n\x0e\x63ollectionLink\x18\x18 \x03(\x0b\x32\x14.PEDM.CollectionLink\x12%\n\tapprovals\x18\x19 \x03(\x0b\x32\x12.PEDM.ApprovalNode\x12\x30\n\x0e\x61pprovalStatus\x18\x1a \x03(\x0b\x32\x18.PEDM.ApprovalStatusNode\"<\n\x12PolicyAgentRequest\x12\x11\n\tpolicyUid\x18\x01 \x03(\x0c\x12\x13\n\x0bsummaryOnly\x18\x02 \x01(\x08\";\n\x13PolicyAgentResponse\x12\x12\n\nagentCount\x18\x01 \x01(\x05\x12\x10\n\x08\x61gentUid\x18\x02 \x03(\x0c\"]\n\x16\x41uditCollectionRequest\x12\x19\n\x11\x63ontinuationToken\x18\x01 \x01(\x0c\x12\x10\n\x08valueUid\x18\x02 \x03(\x0c\x12\x16\n\x0e\x63ollectionName\x18\x03 \x03(\t\"h\n\x14\x41uditCollectionValue\x12\x16\n\x0e\x63ollectionName\x18\x01 \x01(\t\x12\x10\n\x08valueUid\x18\x02 \x01(\x0c\x12\x15\n\rencryptedData\x18\x03 \x01(\x0c\x12\x0f\n\x07\x63reated\x18\x04 \x01(\x03\"q\n\x17\x41uditCollectionResponse\x12*\n\x06values\x18\x01 \x03(\x0b\x32\x1a.PEDM.AuditCollectionValue\x12\x0f\n\x07hasMore\x18\x02 \x01(\x08\x12\x19\n\x11\x63ontinuationToken\x18\x03 \x01(\x0c\"H\n\x18GetCollectionLinkRequest\x12,\n\x0e\x63ollectionLink\x18\x01 \x03(\x0b\x32\x14.PEDM.CollectionLink\"Q\n\x19GetCollectionLinkResponse\x12\x34\n\x12\x63ollectionLinkData\x18\x01 \x03(\x0b\x32\x18.PEDM.CollectionLinkData\"?\n\x17GetAgentLastSeenRequest\x12\x12\n\nactiveOnly\x18\x01 \x01(\x08\x12\x10\n\x08\x61gentUid\x18\x02 \x03(\x0c\"3\n\rAgentLastSeen\x12\x10\n\x08\x61gentUid\x18\x01 \x01(\x0c\x12\x10\n\x08lastSeen\x18\x02 \x01(\x03\"A\n\x18GetAgentLastSeenResponse\x12%\n\x08lastSeen\x18\x01 \x03(\x0b\x32\x13.PEDM.AgentLastSeen\"2\n\x1aGetActiveAgentCountRequest\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x03(\x05\">\n\x10\x41\x63tiveAgentCount\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x14\n\x0c\x61\x63tiveAgents\x18\x02 \x01(\x05\";\n\x12\x41\x63tiveAgentFailure\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"x\n\x1bGetActiveAgentCountResponse\x12*\n\nagentCount\x18\x01 \x03(\x0b\x32\x16.PEDM.ActiveAgentCount\x12-\n\x0b\x66\x61iledCount\x18\x02 \x03(\x0b\x32\x18.PEDM.ActiveAgentFailure\"\x87\x01\n\x19GetAgentDailyCountRequest\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x03(\x05\x12$\n\tmonthYear\x18\x02 \x01(\x0b\x32\x0f.PEDM.MonthYearH\x00\x12$\n\tdateRange\x18\x03 \x01(\x0b\x32\x0f.PEDM.DateRangeH\x00\x42\x08\n\x06period\"(\n\tMonthYear\x12\r\n\x05month\x18\x01 \x01(\x05\x12\x0c\n\x04year\x18\x02 \x01(\x05\"\'\n\tDateRange\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x03\"3\n\x0f\x41gentDailyCount\x12\x0c\n\x04\x64\x61te\x18\x01 \x01(\x03\x12\x12\n\nagentCount\x18\x02 \x01(\x05\"V\n\x17\x41gentCountForEnterprise\x12\x14\n\x0c\x65nterpriseId\x18\x01 \x01(\x05\x12%\n\x06\x63ounts\x18\x02 \x03(\x0b\x32\x15.PEDM.AgentDailyCount\"U\n\x1aGetAgentDailyCountResponse\x12\x37\n\x10\x65nterpriseCounts\x18\x01 \x03(\x0b\x32\x1d.PEDM.AgentCountForEnterprise*j\n\x12\x43ollectionLinkType\x12\r\n\tCLT_OTHER\x10\x00\x12\r\n\tCLT_AGENT\x10\x01\x12\x0e\n\nCLT_POLICY\x10\x02\x12\x12\n\x0e\x43LT_COLLECTION\x10\x03\x12\x12\n\x0e\x43LT_DEPLOYMENT\x10\x04\x42 \n\x18\x63om.keepersecurity.protoB\x04PEDMb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -34,8 +34,8 @@ if not _descriptor._USE_C_DESCRIPTORS: _globals['DESCRIPTOR']._loaded_options = None _globals['DESCRIPTOR']._serialized_options = b'\n\030com.keepersecurity.protoB\004PEDM' - _globals['_COLLECTIONLINKTYPE']._serialized_start=5286 - _globals['_COLLECTIONLINKTYPE']._serialized_end=5392 + _globals['_COLLECTIONLINKTYPE']._serialized_start=5607 + _globals['_COLLECTIONLINKTYPE']._serialized_end=5713 _globals['_PEDMTOTPVALIDATEREQUEST']._serialized_start=60 _globals['_PEDMTOTPVALIDATEREQUEST']._serialized_end=139 _globals['_PEDMSTATUS']._serialized_start=141 @@ -72,64 +72,74 @@ _globals['_COLLECTIONREQUEST']._serialized_end=1876 _globals['_SETCOLLECTIONLINKREQUEST']._serialized_start=1878 _globals['_SETCOLLECTIONLINKREQUEST']._serialized_end=2001 - _globals['_APPROVALACTIONREQUEST']._serialized_start=2003 - _globals['_APPROVALACTIONREQUEST']._serialized_end=2073 - _globals['_DEPLOYMENTNODE']._serialized_start=2076 - _globals['_DEPLOYMENTNODE']._serialized_end=2247 - _globals['_AGENTNODE']._serialized_start=2250 - _globals['_AGENTNODE']._serialized_end=2418 - _globals['_POLICYNODE']._serialized_start=2421 - _globals['_POLICYNODE']._serialized_end=2569 - _globals['_COLLECTIONNODE']._serialized_start=2571 - _globals['_COLLECTIONNODE']._serialized_end=2674 - _globals['_COLLECTIONLINK']._serialized_start=2676 - _globals['_COLLECTIONLINK']._serialized_end=2776 - _globals['_APPROVALSTATUSNODE']._serialized_start=2779 - _globals['_APPROVALSTATUSNODE']._serialized_end=2936 - _globals['_APPROVALNODE']._serialized_start=2939 - _globals['_APPROVALNODE']._serialized_end=3118 - _globals['_FULLSYNCTOKEN']._serialized_start=3120 - _globals['_FULLSYNCTOKEN']._serialized_end=3187 - _globals['_INCSYNCTOKEN']._serialized_start=3189 - _globals['_INCSYNCTOKEN']._serialized_end=3225 - _globals['_PEDMSYNCTOKEN']._serialized_start=3227 - _globals['_PEDMSYNCTOKEN']._serialized_end=3331 - _globals['_GETPEDMDATAREQUEST']._serialized_start=3333 - _globals['_GETPEDMDATAREQUEST']._serialized_end=3380 - _globals['_GETPEDMDATARESPONSE']._serialized_start=3383 - _globals['_GETPEDMDATARESPONSE']._serialized_end=3940 - _globals['_POLICYAGENTREQUEST']._serialized_start=3942 - _globals['_POLICYAGENTREQUEST']._serialized_end=4002 - _globals['_POLICYAGENTRESPONSE']._serialized_start=4004 - _globals['_POLICYAGENTRESPONSE']._serialized_end=4063 - _globals['_AUDITCOLLECTIONREQUEST']._serialized_start=4065 - _globals['_AUDITCOLLECTIONREQUEST']._serialized_end=4158 - _globals['_AUDITCOLLECTIONVALUE']._serialized_start=4160 - _globals['_AUDITCOLLECTIONVALUE']._serialized_end=4264 - _globals['_AUDITCOLLECTIONRESPONSE']._serialized_start=4266 - _globals['_AUDITCOLLECTIONRESPONSE']._serialized_end=4379 - _globals['_GETCOLLECTIONLINKREQUEST']._serialized_start=4381 - _globals['_GETCOLLECTIONLINKREQUEST']._serialized_end=4453 - _globals['_GETCOLLECTIONLINKRESPONSE']._serialized_start=4455 - _globals['_GETCOLLECTIONLINKRESPONSE']._serialized_end=4536 - _globals['_GETACTIVEAGENTCOUNTREQUEST']._serialized_start=4538 - _globals['_GETACTIVEAGENTCOUNTREQUEST']._serialized_end=4588 - _globals['_ACTIVEAGENTCOUNT']._serialized_start=4590 - _globals['_ACTIVEAGENTCOUNT']._serialized_end=4652 - _globals['_ACTIVEAGENTFAILURE']._serialized_start=4654 - _globals['_ACTIVEAGENTFAILURE']._serialized_end=4713 - _globals['_GETACTIVEAGENTCOUNTRESPONSE']._serialized_start=4715 - _globals['_GETACTIVEAGENTCOUNTRESPONSE']._serialized_end=4835 - _globals['_GETAGENTDAILYCOUNTREQUEST']._serialized_start=4838 - _globals['_GETAGENTDAILYCOUNTREQUEST']._serialized_end=4973 - _globals['_MONTHYEAR']._serialized_start=4975 - _globals['_MONTHYEAR']._serialized_end=5015 - _globals['_DATERANGE']._serialized_start=5017 - _globals['_DATERANGE']._serialized_end=5056 - _globals['_AGENTDAILYCOUNT']._serialized_start=5058 - _globals['_AGENTDAILYCOUNT']._serialized_end=5109 - _globals['_AGENTCOUNTFORENTERPRISE']._serialized_start=5111 - _globals['_AGENTCOUNTFORENTERPRISE']._serialized_end=5197 - _globals['_GETAGENTDAILYCOUNTRESPONSE']._serialized_start=5199 - _globals['_GETAGENTDAILYCOUNTRESPONSE']._serialized_end=5284 + _globals['_APPROVALEXTENDDATA']._serialized_start=2003 + _globals['_APPROVALEXTENDDATA']._serialized_end=2062 + _globals['_MODIFYAPPROVALREQUEST']._serialized_start=2064 + _globals['_MODIFYAPPROVALREQUEST']._serialized_end=2137 + _globals['_APPROVALACTIONREQUEST']._serialized_start=2139 + _globals['_APPROVALACTIONREQUEST']._serialized_end=2209 + _globals['_DEPLOYMENTNODE']._serialized_start=2212 + _globals['_DEPLOYMENTNODE']._serialized_end=2383 + _globals['_AGENTNODE']._serialized_start=2386 + _globals['_AGENTNODE']._serialized_end=2554 + _globals['_POLICYNODE']._serialized_start=2557 + _globals['_POLICYNODE']._serialized_end=2705 + _globals['_COLLECTIONNODE']._serialized_start=2707 + _globals['_COLLECTIONNODE']._serialized_end=2810 + _globals['_COLLECTIONLINK']._serialized_start=2812 + _globals['_COLLECTIONLINK']._serialized_end=2912 + _globals['_APPROVALSTATUSNODE']._serialized_start=2915 + _globals['_APPROVALSTATUSNODE']._serialized_end=3072 + _globals['_APPROVALNODE']._serialized_start=3075 + _globals['_APPROVALNODE']._serialized_end=3254 + _globals['_FULLSYNCTOKEN']._serialized_start=3256 + _globals['_FULLSYNCTOKEN']._serialized_end=3323 + _globals['_INCSYNCTOKEN']._serialized_start=3325 + _globals['_INCSYNCTOKEN']._serialized_end=3361 + _globals['_PEDMSYNCTOKEN']._serialized_start=3363 + _globals['_PEDMSYNCTOKEN']._serialized_end=3467 + _globals['_GETPEDMDATAREQUEST']._serialized_start=3469 + _globals['_GETPEDMDATAREQUEST']._serialized_end=3516 + _globals['_GETPEDMDATARESPONSE']._serialized_start=3519 + _globals['_GETPEDMDATARESPONSE']._serialized_end=4076 + _globals['_POLICYAGENTREQUEST']._serialized_start=4078 + _globals['_POLICYAGENTREQUEST']._serialized_end=4138 + _globals['_POLICYAGENTRESPONSE']._serialized_start=4140 + _globals['_POLICYAGENTRESPONSE']._serialized_end=4199 + _globals['_AUDITCOLLECTIONREQUEST']._serialized_start=4201 + _globals['_AUDITCOLLECTIONREQUEST']._serialized_end=4294 + _globals['_AUDITCOLLECTIONVALUE']._serialized_start=4296 + _globals['_AUDITCOLLECTIONVALUE']._serialized_end=4400 + _globals['_AUDITCOLLECTIONRESPONSE']._serialized_start=4402 + _globals['_AUDITCOLLECTIONRESPONSE']._serialized_end=4515 + _globals['_GETCOLLECTIONLINKREQUEST']._serialized_start=4517 + _globals['_GETCOLLECTIONLINKREQUEST']._serialized_end=4589 + _globals['_GETCOLLECTIONLINKRESPONSE']._serialized_start=4591 + _globals['_GETCOLLECTIONLINKRESPONSE']._serialized_end=4672 + _globals['_GETAGENTLASTSEENREQUEST']._serialized_start=4674 + _globals['_GETAGENTLASTSEENREQUEST']._serialized_end=4737 + _globals['_AGENTLASTSEEN']._serialized_start=4739 + _globals['_AGENTLASTSEEN']._serialized_end=4790 + _globals['_GETAGENTLASTSEENRESPONSE']._serialized_start=4792 + _globals['_GETAGENTLASTSEENRESPONSE']._serialized_end=4857 + _globals['_GETACTIVEAGENTCOUNTREQUEST']._serialized_start=4859 + _globals['_GETACTIVEAGENTCOUNTREQUEST']._serialized_end=4909 + _globals['_ACTIVEAGENTCOUNT']._serialized_start=4911 + _globals['_ACTIVEAGENTCOUNT']._serialized_end=4973 + _globals['_ACTIVEAGENTFAILURE']._serialized_start=4975 + _globals['_ACTIVEAGENTFAILURE']._serialized_end=5034 + _globals['_GETACTIVEAGENTCOUNTRESPONSE']._serialized_start=5036 + _globals['_GETACTIVEAGENTCOUNTRESPONSE']._serialized_end=5156 + _globals['_GETAGENTDAILYCOUNTREQUEST']._serialized_start=5159 + _globals['_GETAGENTDAILYCOUNTREQUEST']._serialized_end=5294 + _globals['_MONTHYEAR']._serialized_start=5296 + _globals['_MONTHYEAR']._serialized_end=5336 + _globals['_DATERANGE']._serialized_start=5338 + _globals['_DATERANGE']._serialized_end=5377 + _globals['_AGENTDAILYCOUNT']._serialized_start=5379 + _globals['_AGENTDAILYCOUNT']._serialized_end=5430 + _globals['_AGENTCOUNTFORENTERPRISE']._serialized_start=5432 + _globals['_AGENTCOUNTFORENTERPRISE']._serialized_end=5518 + _globals['_GETAGENTDAILYCOUNTRESPONSE']._serialized_start=5520 + _globals['_GETAGENTDAILYCOUNTRESPONSE']._serialized_end=5605 # @@protoc_insertion_point(module_scope) diff --git a/keepersdk-package/src/keepersdk/proto/pedm_pb2.pyi b/keepersdk-package/src/keepersdk/proto/pedm_pb2.pyi index f38615bf..bdbb0427 100644 --- a/keepersdk-package/src/keepersdk/proto/pedm_pb2.pyi +++ b/keepersdk-package/src/keepersdk/proto/pedm_pb2.pyi @@ -205,6 +205,20 @@ class SetCollectionLinkRequest(_message.Message): removeCollection: _containers.RepeatedCompositeFieldContainer[CollectionLink] def __init__(self, addCollection: _Optional[_Iterable[_Union[CollectionLinkData, _Mapping]]] = ..., removeCollection: _Optional[_Iterable[_Union[CollectionLink, _Mapping]]] = ...) -> None: ... +class ApprovalExtendData(_message.Message): + __slots__ = ("approvalUid", "expireIn") + APPROVALUID_FIELD_NUMBER: _ClassVar[int] + EXPIREIN_FIELD_NUMBER: _ClassVar[int] + approvalUid: bytes + expireIn: int + def __init__(self, approvalUid: _Optional[bytes] = ..., expireIn: _Optional[int] = ...) -> None: ... + +class ModifyApprovalRequest(_message.Message): + __slots__ = ("extendApproval",) + EXTENDAPPROVAL_FIELD_NUMBER: _ClassVar[int] + extendApproval: _containers.RepeatedCompositeFieldContainer[ApprovalExtendData] + def __init__(self, extendApproval: _Optional[_Iterable[_Union[ApprovalExtendData, _Mapping]]] = ...) -> None: ... + class ApprovalActionRequest(_message.Message): __slots__ = ("approve", "deny", "remove") APPROVE_FIELD_NUMBER: _ClassVar[int] @@ -453,6 +467,28 @@ class GetCollectionLinkResponse(_message.Message): collectionLinkData: _containers.RepeatedCompositeFieldContainer[CollectionLinkData] def __init__(self, collectionLinkData: _Optional[_Iterable[_Union[CollectionLinkData, _Mapping]]] = ...) -> None: ... +class GetAgentLastSeenRequest(_message.Message): + __slots__ = ("activeOnly", "agentUid") + ACTIVEONLY_FIELD_NUMBER: _ClassVar[int] + AGENTUID_FIELD_NUMBER: _ClassVar[int] + activeOnly: bool + agentUid: _containers.RepeatedScalarFieldContainer[bytes] + def __init__(self, activeOnly: bool = ..., agentUid: _Optional[_Iterable[bytes]] = ...) -> None: ... + +class AgentLastSeen(_message.Message): + __slots__ = ("agentUid", "lastSeen") + AGENTUID_FIELD_NUMBER: _ClassVar[int] + LASTSEEN_FIELD_NUMBER: _ClassVar[int] + agentUid: bytes + lastSeen: int + def __init__(self, agentUid: _Optional[bytes] = ..., lastSeen: _Optional[int] = ...) -> None: ... + +class GetAgentLastSeenResponse(_message.Message): + __slots__ = ("lastSeen",) + LASTSEEN_FIELD_NUMBER: _ClassVar[int] + lastSeen: _containers.RepeatedCompositeFieldContainer[AgentLastSeen] + def __init__(self, lastSeen: _Optional[_Iterable[_Union[AgentLastSeen, _Mapping]]] = ...) -> None: ... + class GetActiveAgentCountRequest(_message.Message): __slots__ = ("enterpriseId",) ENTERPRISEID_FIELD_NUMBER: _ClassVar[int] diff --git a/keepersdk-package/src/keepersdk/vault/attachment.py b/keepersdk-package/src/keepersdk/vault/attachment.py index 68f82045..8dfb5ab7 100644 --- a/keepersdk-package/src/keepersdk/vault/attachment.py +++ b/keepersdk-package/src/keepersdk/vault/attachment.py @@ -14,6 +14,7 @@ from .vault_record import FileRecord, PasswordRecord, TypedRecord, AttachmentFile, AttachmentFileThumb from .. import utils, crypto from ..proto import record_pb2 +from ..authentication import endpoint class AttachmentDownloadRequest: @@ -212,7 +213,7 @@ def upload_attachments(vault: vault_online.VaultOnline, files = { uo['file_parameter']: (attachment_id, crypto_stream, 'application/octet-stream') } - response = requests.post(uo['url'], files=files, data=uo['parameters']) + response = requests.post(uo['url'], files=files, data=uo['parameters'], verify=endpoint.get_certificate_check()) if response.status_code == uo['success_status_code']: atta.id = attachment_id atta.name = task.name or '' @@ -237,7 +238,7 @@ def upload_attachments(vault: vault_online.VaultOnline, files = { tuo['file_parameter']: (tuo['file_id'], crypto_stream, 'application/octet-stream') } - response = requests.post(tuo['url'], files=files, data=tuo['parameters']) + response = requests.post(tuo['url'], files=files, data=tuo['parameters'], verify=endpoint.get_certificate_check()) if response.status_code == uo['success_status_code']: thumb = AttachmentFileThumb() thumb.id = tuo['file_id'] @@ -301,7 +302,7 @@ def upload_attachments(vault: vault_online.VaultOnline, files = { 'file': (file_ref, crypto_stream, 'application/octet-stream') } - response = requests.post(uo.url, files=files, data=json.loads(uo.parameters)) + response = requests.post(uo.url, files=files, data=json.loads(uo.parameters), verify=endpoint.get_certificate_check()) if response.status_code == uo.success_status_code: facade.file_ref.append(file_ref) if record.linked_keys is None: diff --git a/keepersdk-package/src/keepersdk/vault/batch_operations.py b/keepersdk-package/src/keepersdk/vault/batch_operations.py index e7356b81..2b8ce423 100644 --- a/keepersdk-package/src/keepersdk/vault/batch_operations.py +++ b/keepersdk-package/src/keepersdk/vault/batch_operations.py @@ -588,6 +588,7 @@ def get_shared_folder_key(f_uid: str) -> bytes: if folder_node: if folder_node.folder_type == 'user_folder': tra_rq.folder_type = record_pb2.RecordFolderType.user_folder + shared_folder_uid = None elif folder_node.folder_type == 'shared_folder': tra_rq.folder_type = record_pb2.RecordFolderType.shared_folder shared_folder_uid = folder_node.folder_uid diff --git a/keepersdk-package/src/keepersdk/vault/ksm_management.py b/keepersdk-package/src/keepersdk/vault/ksm_management.py index 31a6f883..50c59254 100644 --- a/keepersdk-package/src/keepersdk/vault/ksm_management.py +++ b/keepersdk-package/src/keepersdk/vault/ksm_management.py @@ -15,7 +15,7 @@ GetAppInfoResponse, RemoveAppClientsRequest, Device, AddAppClientRequest, AppShareAdd, AddAppSharesRequest, RemoveAppSharesRequest ) -from ..proto.enterprise_pb2 import GENERAL +from ..proto.enterprise_pb2 import GENERAL, AppClientType from ..proto.record_pb2 import ApplicationAddRequest URL_GET_SUMMARY_API = 'vault/get_applications_summary' @@ -503,7 +503,8 @@ def add_client_to_ksm_app( first_access_expire_duration_ms: int, access_expire_in_ms: Optional[int], master_key: bytes, - server: str) -> dict: + server: str, + client_type: AppClientType) -> dict: """Generate a single client device and return token info and output string.""" # Generate secret and client ID @@ -523,7 +524,8 @@ def add_client_to_ksm_app( client_id=client_id, client_name=client_name, count=count, - index=index + index=index, + client_type=client_type ) # Generate token with server prefix @@ -539,7 +541,7 @@ def add_client_to_ksm_app( first_access_expire_duration_ms=first_access_expire_duration_ms, access_expire_in_ms=access_expire_in_ms ) - + return { 'token_info': { 'oneTimeToken': token_with_prefix, @@ -568,7 +570,8 @@ def _create_client_request( client_id: bytes, client_name: str, count: int, - index: int) -> Device: + index: int, + client_type: AppClientType) -> Device: """Create and send client request to server.""" request = AddAppClientRequest() @@ -576,7 +579,7 @@ def _create_client_request( request.encryptedAppKey = encrypted_master_key request.lockIp = not unlock_ip request.firstAccessExpireOn = first_access_expire_duration_ms - request.appClientType = GENERAL + request.appClientType = client_type request.clientId = client_id if access_expire_in_ms: diff --git a/keepersdk-package/src/keepersdk/vault/share_report.py b/keepersdk-package/src/keepersdk/vault/share_report.py new file mode 100644 index 00000000..66067013 --- /dev/null +++ b/keepersdk-package/src/keepersdk/vault/share_report.py @@ -0,0 +1,685 @@ +"""Share report functionality for Keeper SDK. + +This module provides functionality to generate comprehensive share reports +for records and shared folders in a Keeper vault. + +Usage: + from keepersdk.vault import share_report + + config = share_report.ShareReportConfig( + show_ownership=True, + verbose=True + ) + generator = share_report.ShareReportGenerator(vault, enterprise, auth, config) + entries = generator.generate_records_report() +""" + +import dataclasses +import datetime +from typing import Optional, List, Dict, Any, Iterable, Set, NamedTuple + +from . import vault_online, vault_types, vault_utils +from . import share_management_utils +from ..authentication import keeper_auth +from ..enterprise import enterprise_data as enterprise_data_types + + +@dataclasses.dataclass +class ShareReportEntry: + """Represents a single entry in the share report.""" + record_uid: str + record_title: str + record_owner: str = '' + shared_with: str = '' + shared_with_count: int = 0 + folder_paths: List[str] = dataclasses.field(default_factory=list) + share_date: Optional[str] = None + expiration: Optional[datetime.datetime] = None + + +@dataclasses.dataclass +class SharedFolderReportEntry: + """Represents a shared folder entry in the report.""" + folder_uid: str + folder_name: str + shared_to: str = '' + permissions: str = '' + folder_path: str = '' + + +@dataclasses.dataclass +class ShareSummaryEntry: + """Represents a summary entry showing shares by target.""" + shared_to: str + record_count: Optional[int] = None + shared_folder_count: Optional[int] = None + + +@dataclasses.dataclass +class ShareReportConfig: + """Configuration for share report generation. + + Attributes: + record_filter: List of record UIDs or names to filter by + user_filter: List of user emails or team names to filter by + container_filter: List of container (folder) UIDs to filter by + show_ownership: Include record ownership information + show_share_date: Include share date information (requires enterprise admin) + folders_only: Generate report for shared folders only (excludes records) + verbose: Include detailed permission information + show_team_users: Expand team memberships in the report + """ + record_filter: Optional[List[str]] = None + user_filter: Optional[List[str]] = None + container_filter: Optional[List[str]] = None + show_ownership: bool = False + show_share_date: bool = False + folders_only: bool = False + verbose: bool = False + show_team_users: bool = False + + +@dataclasses.dataclass +class UserPermissionInfo: + """Information about a user's permission on a record.""" + username: str + is_owner: bool = False + is_share_admin: bool = False + can_share: bool = False + can_edit: bool = False + expiration: int = 0 + + +@dataclasses.dataclass +class RecordShareInfo: + """Share information for a record.""" + record_uid: str + record_title: str + folder_paths: List[str] + user_permissions: List[UserPermissionInfo] + shared_folder_uids: List[str] + + +class SharedFolderMaps(NamedTuple): + """Maps for shared folder users and records.""" + user_map: Dict[str, Set[str]] + records_map: Dict[str, Set[str]] + + +class ShareReportGenerator: + """Generates share reports for records and shared folders. + + This class provides methods to generate detailed reports about record + and folder sharing within a Keeper vault. + + Example: + >>> config = ShareReportConfig(show_ownership=True, verbose=True) + >>> generator = ShareReportGenerator(vault, enterprise, auth, config) + >>> for entry in generator.generate_records_report(): + ... print(f"{entry.record_title}: shared with {entry.shared_with}") + """ + + def __init__( + self, + vault: vault_online.VaultOnline, + enterprise: Optional[enterprise_data_types.EnterpriseData] = None, + auth: Optional[keeper_auth.KeeperAuth] = None, + config: Optional[ShareReportConfig] = None + ) -> None: + """Initialize the ShareReportGenerator. + + Args: + vault: The VaultOnline instance providing access to vault data + enterprise: Optional EnterpriseData for team expansion and share date queries + auth: Optional KeeperAuth for API calls (defaults to vault.keeper_auth) + config: Configuration options for report generation + + Raises: + ValueError: If vault is None + """ + if vault is None: + raise ValueError("vault parameter is required") + self._vault = vault + self._enterprise = enterprise + self._auth = auth or vault.keeper_auth + self._config = config or ShareReportConfig() + self._share_info_cache: Optional[Dict[str, RecordShareInfo]] = None + + @property + def config(self) -> ShareReportConfig: + """Get the current report configuration.""" + return self._config + + @property + def vault(self) -> vault_online.VaultOnline: + """Get the vault instance.""" + return self._vault + + @property + def current_username(self) -> str: + """Get the current user's username.""" + return self._auth.auth_context.username + + def generate_shared_folders_report(self) -> List[SharedFolderReportEntry]: + """Generate a report of shared folders and their permissions. + + Returns: + List of SharedFolderReportEntry objects containing folder share information + """ + entries: List[SharedFolderReportEntry] = [] + + for sf_info in self._vault.vault_data.shared_folders(): + sf = self._vault.vault_data.load_shared_folder(sf_info.shared_folder_uid) + if not sf: + continue + + folder_path = vault_utils.get_folder_path(self._vault.vault_data, sf.shared_folder_uid) + + for perm in sf.user_permissions: + permissions = self._format_folder_permissions(perm) + shared_to = perm.name or perm.user_uid + + if perm.user_type == vault_types.SharedFolderUserType.Team: + shared_to = f'(Team) {shared_to}' + + # Expand team members if requested + if self._config.show_team_users and self._enterprise: + team_users = self._get_team_members(perm.user_uid) + for member in team_users: + entries.append(SharedFolderReportEntry( + folder_uid=sf.shared_folder_uid, + folder_name=sf.name, + shared_to=f'(Team User) {member}', + permissions=permissions, + folder_path=folder_path + )) + + entries.append(SharedFolderReportEntry( + folder_uid=sf.shared_folder_uid, + folder_name=sf.name, + shared_to=shared_to, + permissions=permissions, + folder_path=folder_path + )) + + return entries + + def generate_records_report(self) -> List[ShareReportEntry]: + """Generate a report of shared records.""" + if self._config.record_filter: + record_uids = self._resolve_record_uids(self._config.record_filter) + else: + record_uids = {r.record_uid for r in self._vault.vault_data.records()} + + if not record_uids: + return [] + + share_info_map = self._fetch_share_info(list(record_uids)) or {} + entries: List[ShareReportEntry] = [] + processed_uids: Set[str] = set() + + user_filter_lower = {u.lower() for u in self._config.user_filter} if self._config.user_filter else None + + for uid, share_info in share_info_map.items(): + if not self._should_include_record(share_info): + continue + + if user_filter_lower and not self._record_matches_user_filter(share_info, user_filter_lower): + continue + + entries.append(self._build_share_entry(share_info)) + processed_uids.add(uid) + + self._add_shared_folder_records(entries, processed_uids, share_info_map, user_filter_lower) + + return entries + + def _should_include_record(self, share_info: RecordShareInfo) -> bool: + """Check if a record should be included in the report.""" + non_owner_perms = [p for p in share_info.user_permissions if not p.is_owner] + has_owner = any(p.is_owner for p in share_info.user_permissions) + + if self._config.record_filter: + return True + + if not non_owner_perms: + return False + + return has_owner + + def _add_shared_folder_records( + self, + entries: List[ShareReportEntry], + processed_uids: Set[str], + share_info_map: Dict[str, RecordShareInfo], + user_filter_lower: Optional[Set[str]] + ) -> None: + """Add records from shared folders that weren't returned by the share API.""" + should_include = ( + self._config.user_filter or + self._config.show_ownership or + not self._config.record_filter + ) + + if not should_include: + return + + sf_records = ( + self._get_shared_folder_records_for_user(user_filter_lower) + if user_filter_lower + else self._get_all_shared_folder_records() + ) + + for record_uid in sf_records: + if record_uid in processed_uids: + continue + + record_info = self._vault.vault_data.get_record(record_uid) + if not record_info: + continue + + folder_paths = self._get_folder_paths(record_uid) + owner = self._get_owner_from_share_info(share_info_map, record_uid) + + entries.append(ShareReportEntry( + record_uid=record_uid, + record_title=record_info.title, + record_owner=owner, + shared_with='', + shared_with_count=0, + folder_paths=folder_paths + )) + processed_uids.add(record_uid) + + def _get_folder_paths(self, record_uid: str) -> List[str]: + """Get folder paths for a record.""" + paths = [] + for folder in vault_utils.get_folders_for_record(self._vault.vault_data, record_uid): + path = vault_utils.get_folder_path(self._vault.vault_data, folder.folder_uid) + if path: + paths.append(path) + return paths + + def _get_owner_from_share_info(self, share_info_map: Dict[str, RecordShareInfo], record_uid: str) -> str: + """Extract owner username from share info if available.""" + if record_uid not in share_info_map: + return '' + for perm in share_info_map[record_uid].user_permissions: + if perm.is_owner: + return perm.username + return '' + + def _get_all_shared_folder_records(self) -> Set[str]: + """Get all records in all shared folders.""" + return self._get_shared_folder_records_for_user(None) + + def _get_shared_folder_records_for_user(self, user_filter: Optional[Set[str]]) -> Set[str]: + """Get records in shared folders, optionally filtered by user access.""" + result: Set[str] = set() + + for sf_info in self._vault.vault_data.shared_folders(): + if user_filter: + sf = self._vault.vault_data.load_shared_folder(sf_info.shared_folder_uid) + if not sf or not self._user_has_sf_access(sf, user_filter): + continue + + self._collect_folder_records(sf_info.shared_folder_uid, result) + + return result + + def _user_has_sf_access(self, sf: vault_types.SharedFolder, user_filter: Set[str]) -> bool: + """Check if any user in the filter has access to the shared folder.""" + for perm in sf.user_permissions: + target = (perm.name or perm.user_uid or '').lower() + if target in user_filter: + return True + return False + + def _collect_folder_records(self, folder_uid: str, result: Set[str]) -> None: + """Collect all records from a folder and its subfolders.""" + folder = self._vault.vault_data.get_folder(folder_uid) + if not folder: + return + + result.update(folder.records) + + def collect(f: vault_types.Folder) -> None: + result.update(f.records) + + vault_utils.traverse_folder_tree(self._vault.vault_data, folder, collect) + + def generate_summary_report(self) -> List[ShareSummaryEntry]: + """Generate a summary report showing share counts by target user.""" + record_shares: Dict[str, Set[str]] = {} + sf_shares: Dict[str, Set[str]] = {} + + sf_maps = self._build_shared_folder_maps() + self._aggregate_shared_folder_access(sf_maps.user_map, sf_maps.records_map, record_shares, sf_shares) + self._aggregate_direct_shares(record_shares) + self._remove_current_user(record_shares, sf_shares) + + return self._build_summary_entries(record_shares, sf_shares) + + def _build_shared_folder_maps(self) -> SharedFolderMaps: + """Build maps of shared folder users and records. + + Returns: + SharedFolderMaps containing user_map and records_map + """ + sf_user_map: Dict[str, Set[str]] = {} + sf_records_map: Dict[str, Set[str]] = {} + + for sf_info in self._vault.vault_data.shared_folders(): + sf = self._vault.vault_data.load_shared_folder(sf_info.shared_folder_uid) + if not sf: + continue + + users_in_sf: Set[str] = set() + for perm in sf.user_permissions: + target = perm.name or perm.user_uid + if target: + users_in_sf.add(target) + sf_user_map[sf_info.shared_folder_uid] = users_in_sf + + folder = self._vault.vault_data.get_folder(sf_info.shared_folder_uid) + if folder and folder.records: + sf_records_map[sf_info.shared_folder_uid] = set(folder.records) + + return SharedFolderMaps(user_map=sf_user_map, records_map=sf_records_map) + + def _aggregate_shared_folder_access( + self, + sf_user_map: Dict[str, Set[str]], + sf_records_map: Dict[str, Set[str]], + record_shares: Dict[str, Set[str]], + sf_shares: Dict[str, Set[str]] + ) -> None: + """Aggregate record and folder counts from shared folder access.""" + for sf_uid, users in sf_user_map.items(): + records_in_sf = sf_records_map.get(sf_uid, set()) + for target in users: + if target == self.current_username: + continue + sf_shares.setdefault(target, set()).add(sf_uid) + for record_uid in records_in_sf: + record_shares.setdefault(target, set()).add(record_uid) + + def _aggregate_direct_shares(self, record_shares: Dict[str, Set[str]]) -> None: + """Aggregate record counts from direct share permissions.""" + all_record_uids = [r.record_uid for r in self._vault.vault_data.records()] + if not all_record_uids: + return + + share_info_map = self._fetch_share_info(all_record_uids) + for uid, share_info in share_info_map.items(): + for perm in share_info.user_permissions: + if perm.username != self.current_username: + record_shares.setdefault(perm.username, set()).add(uid) + + def _remove_current_user( + self, + record_shares: Dict[str, Set[str]], + sf_shares: Dict[str, Set[str]] + ) -> None: + """Remove current user from share counts.""" + record_shares.pop(self.current_username, None) + sf_shares.pop(self.current_username, None) + + def _build_summary_entries( + self, + record_shares: Dict[str, Set[str]], + sf_shares: Dict[str, Set[str]] + ) -> List[ShareSummaryEntry]: + """Build sorted list of summary entries.""" + all_targets = set(record_shares.keys()) | set(sf_shares.keys()) + return [ + ShareSummaryEntry( + shared_to=target, + record_count=len(record_shares.get(target, set())) or None, + shared_folder_count=len(sf_shares.get(target, set())) or None + ) + for target in sorted(all_targets) + ] + + def generate_report_rows(self) -> Iterable[List[Any]]: + """Generate report rows suitable for tabular output. + + Yields: + Lists of values representing report rows + """ + if self._config.folders_only: + for entry in self.generate_shared_folders_report(): + yield [entry.folder_uid, entry.folder_name, entry.shared_to, + entry.permissions, entry.folder_path] + elif self._config.show_ownership: + for entry in self.generate_records_report(): + shared_info = entry.shared_with if self._config.verbose else entry.shared_with_count + yield [entry.record_owner, entry.record_uid, entry.record_title, + shared_info, '\n'.join(entry.folder_paths)] + else: + for entry in self.generate_summary_report(): + yield [entry.shared_to, entry.record_count, entry.shared_folder_count] + + @staticmethod + def get_headers(folders_only: bool = False, ownership: bool = False) -> List[str]: + """Get report headers based on configuration. + + Args: + folders_only: True if generating shared folders report + ownership: True if generating ownership report + + Returns: + List of header column names + """ + if folders_only: + return ['folder_uid', 'folder_name', 'shared_to', 'permissions', 'folder_path'] + if ownership: + return ['record_owner', 'record_uid', 'record_title', 'shared_with', 'folder_path'] + return ['shared_to', 'records', 'shared_folders'] + + def _resolve_record_uids(self, record_refs: List[str]) -> Set[str]: + """Resolve record names or UIDs to actual UIDs.""" + result: Set[str] = set() + vault_data_instance = self._vault.vault_data + + for ref in record_refs: + record = vault_data_instance.get_record(ref) + if record: + result.add(ref) + continue + + for record_info in vault_data_instance.records(): + if record_info.title.lower() == ref.lower(): + result.add(record_info.record_uid) + break + + return result + + def _fetch_share_info(self, record_uids: List[str]) -> Dict[str, RecordShareInfo]: + """Fetch share information for records using the API.""" + if not record_uids: + return {} + + result: Dict[str, RecordShareInfo] = {} + + try: + shares_data = share_management_utils.get_record_shares( + self._vault, record_uids, is_share_admin=False + ) + + if not shares_data: + return result + + for share_record in shares_data: + record_uid = share_record.get('record_uid') + if not record_uid: + continue + + record_info = self._vault.vault_data.get_record(record_uid) + shares = share_record.get('shares', {}) + + result[record_uid] = RecordShareInfo( + record_uid=record_uid, + record_title=record_info.title if record_info else record_uid, + folder_paths=self._get_folder_paths(record_uid), + user_permissions=self._parse_user_permissions(shares), + shared_folder_uids=[ + sp.get('shared_folder_uid') + for sp in shares.get('shared_folder_permissions', []) + if sp.get('shared_folder_uid') + ] + ) + except Exception: + pass + + return result + + def _parse_user_permissions(self, shares: Dict) -> List[UserPermissionInfo]: + """Parse user permissions from share data.""" + permissions = [] + for up in shares.get('user_permissions', []): + exp = up.get('expiration', 0) + if isinstance(exp, str): + try: + exp = int(exp) + except ValueError: + exp = 0 + permissions.append(UserPermissionInfo( + username=up.get('username', ''), + is_owner=up.get('owner', False), + is_share_admin=up.get('share_admin', False), + can_share=up.get('shareable', False), + can_edit=up.get('editable', False), + expiration=exp + )) + return permissions + + def _build_share_entry(self, share_info: RecordShareInfo) -> ShareReportEntry: + """Build a ShareReportEntry from RecordShareInfo.""" + owner = self._get_owner_from_share_info({share_info.record_uid: share_info}, share_info.record_uid) + non_owner_shares = [p for p in share_info.user_permissions if not p.is_owner] + + shared_with = '' + if self._config.verbose: + shared_with = self._format_verbose_permissions(share_info) + + return ShareReportEntry( + record_uid=share_info.record_uid, + record_title=share_info.record_title, + record_owner=owner, + shared_with=shared_with, + shared_with_count=len(non_owner_shares), + folder_paths=share_info.folder_paths + ) + + def _format_verbose_permissions(self, share_info: RecordShareInfo) -> str: + """Format user permissions as newline-separated usernames.""" + lines: List[str] = [] + for perm in share_info.user_permissions: + lines.append(perm.username) + if perm.expiration > 0: + dt = datetime.datetime.fromtimestamp(perm.expiration // 1000) + lines.append(f'\t(expires on {dt})') + return '\n'.join(lines) + + def _format_folder_permissions(self, perm: vault_types.SharedFolderPermission) -> str: + """Format shared folder permissions as human-readable text.""" + if perm.manage_users and perm.manage_records: + return "Can Manage Users & Records" + if perm.manage_records: + return "Can Manage Records" + if perm.manage_users: + return "Can Manage Users" + return "No User Permissions" + + def _record_matches_user_filter(self, share_info: RecordShareInfo, user_filter: Set[str]) -> bool: + """Check if user has access via direct shares or shared folder membership.""" + for perm in share_info.user_permissions: + if perm.username.lower() in user_filter: + return True + + for folder in vault_utils.get_folders_for_record(self._vault.vault_data, share_info.record_uid): + sf_uid = self._get_shared_folder_uid(folder) + if sf_uid: + sf = self._vault.vault_data.load_shared_folder(sf_uid) + if sf and self._user_has_sf_access(sf, user_filter): + return True + + return False + + def _get_shared_folder_uid(self, folder: vault_types.Folder) -> Optional[str]: + """Get the shared folder UID for a folder.""" + if folder.folder_type == 'shared_folder': + return folder.folder_uid + if folder.folder_type == 'shared_folder_folder' and folder.folder_scope_uid: + return folder.folder_scope_uid + return None + + def _get_team_members(self, team_uid: str) -> List[str]: + """Get team member usernames.""" + if not self._enterprise: + return [] + + members: List[str] = [] + try: + for team_user in self._enterprise.team_users.get_all_links(): + if team_user.team_uid == team_uid: + user = self._enterprise.users.get_entity(team_user.enterprise_user_id) + if user: + members.append(user.username) + except Exception: + pass + + return members + + +def generate_share_report( + vault: vault_online.VaultOnline, + enterprise: Optional[enterprise_data_types.EnterpriseData] = None, + record_filter: Optional[List[str]] = None, + user_filter: Optional[List[str]] = None, + verbose: bool = False +) -> List[ShareReportEntry]: + """Generate a share report for records. + + Args: + vault: The VaultOnline instance (required) + enterprise: Optional EnterpriseData for team expansion + record_filter: Optional list of record UIDs or names to filter by + user_filter: Optional list of user emails to filter by + verbose: Include detailed permission information + + Returns: + List of ShareReportEntry objects + + Raises: + ValueError: If vault is None + """ + config = ShareReportConfig( + record_filter=record_filter, + user_filter=user_filter, + verbose=verbose + ) + return ShareReportGenerator(vault, enterprise, config=config).generate_records_report() + + +def generate_shared_folders_report( + vault: vault_online.VaultOnline, + enterprise: Optional[enterprise_data_types.EnterpriseData] = None, + show_team_users: bool = False +) -> List[SharedFolderReportEntry]: + """Generate a report of shared folders and their permissions. + + Args: + vault: The VaultOnline instance (required) + enterprise: Optional EnterpriseData for team expansion + show_team_users: Expand team memberships in the report + + Returns: + List of SharedFolderReportEntry objects + + Raises: + ValueError: If vault is None + """ + config = ShareReportConfig(folders_only=True, show_team_users=show_team_users) + return ShareReportGenerator(vault, enterprise, config=config).generate_shared_folders_report() diff --git a/keepersdk-package/src/keepersdk/vault/shared_records_report.py b/keepersdk-package/src/keepersdk/vault/shared_records_report.py new file mode 100644 index 00000000..965f722a --- /dev/null +++ b/keepersdk-package/src/keepersdk/vault/shared_records_report.py @@ -0,0 +1,455 @@ +"""Shared records report functionality for Keeper SDK. + +This module provides functionality to generate comprehensive reports of shared records +for a logged-in user in a Keeper vault. + +Usage: + from keepersdk.vault import shared_records_report + + config = shared_records_report.SharedRecordsReportConfig( + show_team_users=True, + all_records=True + ) + generator = shared_records_report.SharedRecordsReportGenerator(vault, enterprise, auth, config) + entries = generator.generate_report() +""" + +import dataclasses +from typing import Optional, List, Dict, Any, Iterable, Set + +from . import vault_online, vault_types, vault_utils, vault_record +from . import share_management_utils +from ..authentication import keeper_auth +from ..enterprise import enterprise_data as enterprise_data_types + + +# Share types +SHARE_TYPE_DIRECT = 'Direct Share' +SHARE_TYPE_SHARED_FOLDER = 'Share Folder' +SHARE_TYPE_TEAM_FOLDER = 'Share Team Folder' + +# Record versions to include +OWNED_RECORD_VERSIONS = (2, 3) +ALL_RECORD_VERSIONS = (0, 1, 2, 3, 5, 6) + + +@dataclasses.dataclass +class SharedRecordReportEntry: + """Represents a single entry in the shared records report.""" + record_uid: str + title: str + owner: str = '' + share_type: str = '' + shared_to: str = '' + permissions: str = '' + folder_path: str = '' + + +@dataclasses.dataclass +class SharedRecordsReportConfig: + """Configuration for shared records report generation. + + Attributes: + folder_filter: List of folder UIDs or paths to filter by + show_team_users: Expand team memberships to show individual team members + all_records: Include all records in vault (not just owned records) + """ + folder_filter: Optional[List[str]] = None + show_team_users: bool = False + all_records: bool = False + + +class SharedRecordsReportGenerator: + """Generates shared records reports for a logged-in user. + + This class provides methods to generate detailed reports about shared records + within a Keeper vault, showing who has access to records and through what mechanism. + + Example: + >>> config = SharedRecordsReportConfig(show_team_users=True) + >>> generator = SharedRecordsReportGenerator(vault, enterprise, auth, config) + >>> for entry in generator.generate_report(): + ... print(f"{entry.title}: shared with {entry.shared_to} via {entry.share_type}") + """ + + def __init__( + self, + vault: vault_online.VaultOnline, + enterprise: Optional[enterprise_data_types.EnterpriseData] = None, + auth: Optional[keeper_auth.KeeperAuth] = None, + config: Optional[SharedRecordsReportConfig] = None + ) -> None: + """Initialize the SharedRecordsReportGenerator. + + Args: + vault: The VaultOnline instance providing access to vault data + enterprise: Optional EnterpriseData for team expansion + auth: Optional KeeperAuth for API calls (defaults to vault.keeper_auth) + config: Configuration options for report generation + """ + self._vault = vault + self._enterprise = enterprise + self._auth = auth or vault.keeper_auth + self._config = config or SharedRecordsReportConfig() + self._team_membership: Optional[Dict[str, List[str]]] = None + self._shares_cache: Dict[str, Dict[str, Any]] = {} + + @property + def config(self) -> SharedRecordsReportConfig: + """Get the current report configuration.""" + return self._config + + @property + def vault(self) -> vault_online.VaultOnline: + """Get the vault instance.""" + return self._vault + + @property + def current_username(self) -> str: + """Get the current user's username.""" + return self._auth.auth_context.username + + @staticmethod + def permissions_text( + *, + can_share: Optional[bool] = None, + can_edit: Optional[bool] = None, + can_view: bool = True + ) -> str: + """Generate human-readable permissions text. + + Args: + can_share: Whether the user can share the record + can_edit: Whether the user can edit the record + can_view: Whether the user can view the record (default True) + + Returns: + Human-readable permission string + """ + if not can_edit and not can_share: + return 'Read Only' if can_view else 'Launch Only' + else: + privs = [can_share and 'Share', can_edit and 'Edit'] + return f'Can {" & ".join([p for p in privs if p])}' + + def generate_report(self) -> List[SharedRecordReportEntry]: + """Generate the shared records report. + + Returns: + List of SharedRecordReportEntry objects containing share information + """ + records = self._get_records_to_report() + if not records: + return [] + + # Fetch share information for all records + record_uids = list(records.keys()) + self._fetch_and_cache_shares(record_uids) + + # Build team membership cache if needed + if self._config.show_team_users: + self._build_team_membership_cache() + + entries: List[SharedRecordReportEntry] = [] + + for record_uid, record_info in records.items(): + record_entries = self._process_record_shares(record_uid, record_info) + entries.extend(record_entries) + + return entries + + def _fetch_and_cache_shares(self, record_uids: List[str]) -> None: + """Fetch and cache share information for records.""" + shares_data = share_management_utils.get_record_shares(self._vault, record_uids) + if shares_data: + for share_record in shares_data: + record_uid = share_record.get('record_uid') + if record_uid: + self._shares_cache[record_uid] = share_record + + def _get_records_to_report(self) -> Dict[str, Any]: + """Get records to include in the report based on configuration.""" + records: Dict[str, Any] = {} + versions = ALL_RECORD_VERSIONS if self._config.all_records else OWNED_RECORD_VERSIONS + filter_folders: Optional[Set[str]] = None + + if self._config.folder_filter: + filter_folders = set() + for folder_name in self._config.folder_filter: + folder_uids = share_management_utils.get_folder_uids(self._vault, folder_name) + if folder_uids: + for uid in folder_uids: + self._traverse_folder_for_records(uid, filter_folders, records, versions) + else: + # Get all shared records matching criteria + for record_info in self._vault.vault_data.records(): + if not (record_info.flags & vault_record.RecordFlags.IsShared): + continue + if record_info.version not in versions: + continue + if not self._config.all_records: + # Check if user owns the record + if not (record_info.flags & vault_record.RecordFlags.IsOwner): + continue + records[record_info.record_uid] = record_info + + return records + + def _traverse_folder_for_records( + self, + folder_uid: str, + filter_folders: Set[str], + records: Dict[str, Any], + versions: tuple + ) -> None: + """Traverse a folder tree and collect records.""" + folder = self._vault.vault_data.get_folder(folder_uid) + if not folder: + return + + def on_folder(f: vault_types.Folder) -> None: + filter_folders.add(f.folder_uid) + for record_uid in f.records: + if record_uid in records: + continue + record_info = self._vault.vault_data.get_record(record_uid) + if not record_info: + continue + if not (record_info.flags & vault_record.RecordFlags.IsShared): + continue + if record_info.version not in versions: + continue + if not self._config.all_records: + if not (record_info.flags & vault_record.RecordFlags.IsOwner): + continue + records[record_uid] = record_info + + vault_utils.traverse_folder_tree(self._vault.vault_data, folder, on_folder) + + def _build_team_membership_cache(self) -> None: + """Build cache of team memberships for team expansion.""" + self._team_membership = {} + + if self._enterprise is None: + return + + # Build user lookup + user_lookup: Dict[int, str] = {} + for user in self._enterprise.users.get_all_entities(): + if user.status == 'active': + user_lookup[user.enterprise_user_id] = user.username + + # Build team membership + for team_user in self._enterprise.team_users.get_all_links(): + team_uid = team_user.team_uid + enterprise_user_id = team_user.enterprise_user_id + if enterprise_user_id in user_lookup: + if team_uid not in self._team_membership: + self._team_membership[team_uid] = [] + self._team_membership[team_uid].append(user_lookup[enterprise_user_id]) + + def _process_record_shares(self, record_uid: str, record_info: Any) -> List[SharedRecordReportEntry]: + """Process shares for a single record.""" + entries: List[SharedRecordReportEntry] = [] + + # Get share info from cache + shares_data = self._get_record_shares_data(record_uid) + if not shares_data: + return entries + + shares = shares_data.get('shares', {}) + owner = self._get_owner_from_shares(shares) + folder_path = self._get_folder_path(record_uid) + + # Process user permissions (direct shares) + for up in shares.get('user_permissions', []): + username = up.get('username') + if not username: + continue + if not self._config.all_records and username == self.current_username: + continue + + permission = self.permissions_text( + can_share=up.get('shareable'), + can_edit=up.get('editable') + ) + + entries.append(SharedRecordReportEntry( + record_uid=record_uid, + title=record_info.title, + owner=owner, + share_type=SHARE_TYPE_DIRECT, + shared_to=username, + permissions=permission, + folder_path=folder_path + )) + + # Process shared folder permissions + for sfp in shares.get('shared_folder_permissions', []): + shared_folder_uid = sfp.get('shared_folder_uid') + can_share = sfp.get('reshareable') + can_edit = sfp.get('editable') + base_permission = self.permissions_text(can_share=can_share, can_edit=can_edit) + + sf = self._vault.vault_data.load_shared_folder(shared_folder_uid) + if sf: + sf_folder_path = vault_utils.get_folder_path(self._vault.vault_data, shared_folder_uid) + + # Process users in shared folder + for perm in sf.user_permissions: + if perm.user_type == vault_types.SharedFolderUserType.User: + username = perm.name or perm.user_uid + if not self._config.all_records and username == self.current_username: + continue + + entries.append(SharedRecordReportEntry( + record_uid=record_uid, + title=record_info.title, + owner=owner, + share_type=SHARE_TYPE_SHARED_FOLDER, + shared_to=username, + permissions=base_permission, + folder_path=sf_folder_path + )) + + elif perm.user_type == vault_types.SharedFolderUserType.Team: + team_uid = perm.user_uid + team_name = perm.name or team_uid + + # Calculate team-specific permissions + team_permission = self._get_team_permission(team_uid, can_share, can_edit) + + # Expand team members if requested + if self._team_membership and team_uid in self._team_membership: + for member in self._team_membership[team_uid]: + entries.append(SharedRecordReportEntry( + record_uid=record_uid, + title=record_info.title, + owner=owner, + share_type=SHARE_TYPE_TEAM_FOLDER, + shared_to=f'({team_name}) {member}', + permissions=team_permission, + folder_path=sf_folder_path + )) + else: + entries.append(SharedRecordReportEntry( + record_uid=record_uid, + title=record_info.title, + owner=owner, + share_type=SHARE_TYPE_TEAM_FOLDER, + shared_to=team_name, + permissions=team_permission, + folder_path=sf_folder_path + )) + else: + # Shared folder not accessible + entries.append(SharedRecordReportEntry( + record_uid=record_uid, + title=record_info.title, + owner=owner, + share_type=SHARE_TYPE_SHARED_FOLDER, + shared_to='***', + permissions=base_permission, + folder_path=shared_folder_uid + )) + + return entries + + def _get_record_shares_data(self, record_uid: str) -> Optional[Dict[str, Any]]: + """Get cached share data for a record.""" + return self._shares_cache.get(record_uid) + + def _get_owner_from_shares(self, shares: Dict) -> str: + """Extract owner username from share data.""" + for up in shares.get('user_permissions', []): + if up.get('owner') is True: + return up.get('username', '') + return '' + + def _get_folder_path(self, record_uid: str) -> str: + """Get folder path(s) for a record.""" + paths: List[str] = [] + for folder in vault_utils.get_folders_for_record(self._vault.vault_data, record_uid): + path = vault_utils.get_folder_path(self._vault.vault_data, folder.folder_uid) + if path: + paths.append(path) + return '\n'.join(paths) + + def _get_team_permission( + self, + team_uid: str, + can_share: Optional[bool], + can_edit: Optional[bool] + ) -> str: + """Calculate team-specific permissions considering team restrictions.""" + # Try to get team restrictions from vault cache + team = None + for t_info in self._vault.vault_data.teams(): + if t_info.team_uid == team_uid: + team = self._vault.vault_data.load_team(team_uid) + break + + if team: + return self.permissions_text( + can_share=can_share and not team.restrict_share, + can_edit=can_edit and not team.restrict_edit, + can_view=not team.restrict_view + ) + + return self.permissions_text(can_share=can_share, can_edit=can_edit) + + def generate_report_rows(self) -> Iterable[List[Any]]: + """Generate report rows suitable for tabular output. + + Yields: + Lists of values representing report rows + """ + for entry in self.generate_report(): + if self._config.all_records: + yield [entry.owner, entry.record_uid, entry.title, entry.share_type, + entry.shared_to, entry.permissions, entry.folder_path] + else: + yield [entry.record_uid, entry.title, entry.share_type, + entry.shared_to, entry.permissions, entry.folder_path] + + @staticmethod + def get_headers(all_records: bool = False) -> List[str]: + """Get report headers based on configuration. + + Args: + all_records: True if reporting on all records (includes owner column) + + Returns: + List of header column names + """ + if all_records: + return ['owner', 'record_uid', 'title', 'share_type', 'shared_to', 'permissions', 'folder_path'] + return ['record_uid', 'title', 'share_type', 'shared_to', 'permissions', 'folder_path'] + + +def generate_shared_records_report( + vault: vault_online.VaultOnline, + enterprise: Optional[enterprise_data_types.EnterpriseData] = None, + folder_filter: Optional[List[str]] = None, + show_team_users: bool = False, + all_records: bool = False +) -> List[SharedRecordReportEntry]: + """Convenience function to generate a shared records report. + + Args: + vault: The VaultOnline instance + enterprise: Optional EnterpriseData for team expansion + folder_filter: Optional list of folder UIDs/paths to filter by + show_team_users: Expand team memberships + all_records: Include all records (not just owned) + + Returns: + List of SharedRecordReportEntry objects + """ + config = SharedRecordsReportConfig( + folder_filter=folder_filter, + show_team_users=show_team_users, + all_records=all_records + ) + return SharedRecordsReportGenerator(vault, enterprise, config=config).generate_report() + diff --git a/keepersdk-package/src/keepersdk/vault/shares_management.py b/keepersdk-package/src/keepersdk/vault/shares_management.py index 610604e6..d76108db 100644 --- a/keepersdk-package/src/keepersdk/vault/shares_management.py +++ b/keepersdk-package/src/keepersdk/vault/shares_management.py @@ -190,17 +190,23 @@ def _handle_user_invitations(vault, all_users, action, dry_run): return all_users @staticmethod - def _encrypt_record_key_for_user(vault, record_key, email, ro): + def _encrypt_record_key_for_user(vault: vault_online.VaultOnline, record_key: bytes, email: str, ro: record_pb2.SharedRecord): """Encrypt record key for a user using their public key.""" - keys = vault.keeper_auth._key_cache[email] - if vault.keeper_auth.auth_context.forbid_rsa and keys.ec: - ec_key = crypto.load_ec_public_key(keys.ec) - ro.recordKey = crypto.encrypt_ec(record_key, ec_key) - ro.useEccKey = True - elif not vault.keeper_auth.auth_context.forbid_rsa and keys.rsa: - rsa_key = crypto.load_rsa_public_key(keys.rsa) - ro.recordKey = crypto.encrypt_rsa(record_key, rsa_key) - ro.useEccKey = False + keys = vault.keeper_auth.get_user_keys(email) + if not keys: + vault.keeper_auth.load_user_public_keys([email]) + keys = vault.keeper_auth.get_user_keys(email) + if not keys: + raise ValueError(f'User {email} public key not found') + if keys: + if vault.keeper_auth.auth_context.forbid_rsa and keys.ec: + ec_key = crypto.load_ec_public_key(keys.ec) + ro.recordKey = crypto.encrypt_ec(record_key, ec_key) + ro.useEccKey = True + elif not vault.keeper_auth.auth_context.forbid_rsa and keys.rsa: + rsa_key = crypto.load_rsa_public_key(keys.rsa) + ro.recordKey = crypto.encrypt_rsa(record_key, rsa_key) + ro.useEccKey = False @staticmethod def _build_shared_record(vault, email, record_uid, record_path, action, @@ -238,9 +244,9 @@ def _build_shared_record(vault, email, record_uid, record_path, action, return ro @staticmethod - def _process_record_shares(vault, record_uids, all_users, action, can_edit, - can_share, share_expiration, record_cache, - not_owned_records, is_share_admin, enterprise): + def _process_record_shares(vault: vault_online.VaultOnline, record_uids: list[str], all_users: list[str], action: str, can_edit: bool, + can_share: bool, share_expiration: int, record_cache: dict[str, dict], + not_owned_records: dict[str, dict], is_share_admin: bool, enterprise: enterprise_data.EnterpriseData): """Process shares for all records and users, building the request.""" rq = record_pb2.RecordShareUpdateRequest() @@ -289,7 +295,7 @@ def _process_record_shares(vault, record_uids, all_users, action, can_edit, if action in {ShareAction.GRANT.value, ShareAction.OWNER.value}: record_uid_to_use = rec.get('record_uid', record_uid) if isinstance(rec, dict) else getattr(rec, 'record_uid', record_uid) record_key = vault.vault_data.get_record_key(record_uid=record_uid_to_use) - if record_key and email not in existing_shares and vault.keeper_auth._key_cache and email in vault.keeper_auth._key_cache: + if record_key and email not in existing_shares: RecordShares._encrypt_record_key_for_user(vault, record_key, email, ro) if email in existing_shares: