From 26a0bb4a5cc73e45e28be1f11f669d57c19a8460 Mon Sep 17 00:00:00 2001 From: bosd Date: Wed, 9 Jul 2025 11:40:24 +0200 Subject: [PATCH 01/24] Python 3 support --- odoo_import_scaffold.py | 1009 +++++++++++++++++++-------------------- 1 file changed, 502 insertions(+), 507 deletions(-) diff --git a/odoo_import_scaffold.py b/odoo_import_scaffold.py index 04acca8..6a78d19 100755 --- a/odoo_import_scaffold.py +++ b/odoo_import_scaffold.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python -#-*- coding: utf-8 -*- +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- import sys import argparse @@ -9,7 +9,9 @@ import odoolib import io import socket -from odoo_csv_tools.lib import conf_lib +from odoo_data_flow.lib import conf_lib +from pathlib import Path +from typing import List, Dict, Any, Callable, Union module_version = '1.4.2' offline = False @@ -20,120 +22,142 @@ # FUNCTIONS FOR DIRECTORY STRUCTURE ############################################################################## -def create_folder(path): - """ - Create a folder only if it doesn't exist or if the flag "force" is set. +def create_folder(path: Path) -> None: + """Create a folder only if it doesn't exist or if the flag "force" is set. + + Args: + path: The path of the folder to create. """ - if os.path.exists(path) and not force: - if verbose: sys.stdout.write('Folder %s already exists.\n' % path) + if path.exists() and not force: + if verbose: + sys.stdout.write(f'Folder {path} already exists.\n') return - if verbose: sys.stdout.write('Create folder %s\n' % path) - try: - os.makedirs(path) - except OSError as exception: - if exception.errno != errno.EEXIST: - raise + if verbose: + sys.stdout.write(f'Create folder {path}\n') + path.mkdir(parents=True, exist_ok=True) -def check_file_exists(func): - """ - Decorator avoiding to overwrite a file if it already exists + +def check_file_exists(func: Callable) -> Callable: + """Decorator avoiding to overwrite a file if it already exists but still allowing it if the flag "force" is set. + + Args: + func: The function to decorate. + + Returns: + The decorated function. """ def wrapper(*args, **kwargs): - file = args[0] - if os.path.isfile(file) and not force: - if verbose: sys.stdout.write('File %s already exists.\n' % file) + file_path = Path(args[0]) + if file_path.is_file() and not force: + if verbose: + sys.stdout.write(f'File {file_path} already exists.\n') return - if verbose: sys.stdout.write('Create file %s\n' % file) + if verbose: + sys.stdout.write(f'Create file {file_path}\n') func(*args, **kwargs) return wrapper -def is_remote_host(hostname): - """ - Return True if 'hostname' is not the local host. +def is_remote_host(hostname: str) -> bool: + """Return True if 'hostname' is not the local host. + + Args: + hostname: The hostname to check. + + Returns: + True if the hostname is remote, False otherwise. """ - my_host_name = socket.gethostname() + my_host_name = socket.gethostname() my_host_ip = socket.gethostbyname(my_host_name) if any(hostname in x for x in [my_host_name, my_host_ip, socket.getfqdn(), 'localhost', '127.0.0.1']): return False return True - + @check_file_exists -def create_connection_file_local(file): - """ - Enforce encrypted connection on remote hosts. +def create_connection_file_local(file: Path) -> None: + """Enforce encrypted connection on remote hosts. Leave unencrypted for local databases. + + Args: + file: The path to the connection file. """ is_remote = is_remote_host(host) protocol = 'jsonrpcs' if is_remote else 'jsonrpc' port = '443' if is_remote else '8069' - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("[Connection]\n") - f.write("hostname = %s\n" % host) - f.write("database = %s\n" % dbname) + f.write(f"hostname = {host}\n") + f.write(f"database = {dbname}\n") f.write("login = admin\n") f.write("password = admin\n") - f.write("protocol = %s\n" % protocol) - f.write("port = %s\n" % port) - f.write("uid = %s\n" % userid) + f.write(f"protocol = {protocol}\n") + f.write(f"port = {port}\n") + f.write(f"uid = {userid}\n") @check_file_exists -def create_connection_file_remote(file, hostname): - """ - Just a preset for encrypted connection. +def create_connection_file_remote(file: Path, hostname: str) -> None: + """Just a preset for encrypted connection. + + Args: + file: The path to the connection file. + hostname: The hostname of the remote server. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("[Connection]\n") - f.write("hostname = %s\n" % hostname) + f.write(f"hostname = {hostname}\n") f.write("database = \n") f.write("login = \n") f.write("password = \n") f.write("protocol = jsonrpcs\n") f.write("port = 443\n") - f.write("uid = %s\n" % userid) + f.write(f"uid = {userid}\n") @check_file_exists -def create_cleanup_script(file): - """ - Create the shell script to empty the folder "data". +def create_cleanup_script(file: Path) -> None: + """Create the shell script to empty the folder "data". + + Args: + file: The path to the cleanup script. """ if platform.system() == 'Windows': - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("@echo off\n\n") - f.write("set DIR=%s\n" % data_dir_name) - f.write("del /F /S /Q %DIR%\*\n") + f.write(f"set DIR={data_dir_name}\n") + f.write(f"del /F /S /Q %DIR%\\*\n") else: - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("#!/usr/bin/env bash\n\n") - f.write("DIR=%s%s\n" % (data_dir_name, os.sep)) + f.write(f"DIR={data_dir_name}/\n") f.write("rm -rf --interactive=never $DIR\n") f.write("mkdir -p $DIR\n") - os.chmod(file, 0o755) + file.chmod(0o755) @check_file_exists -def create_transform_script(file): - """ - Create the shell script skeleton that launches all transform commands. +def create_transform_script(file: Path) -> None: + """Create the shell script skeleton that launches all transform commands. + + Args: + file: The path to the transform script. """ if platform.system() == 'Windows': - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("@echo off\n\n") - f.write("set LOGDIR=%s\n" % log_dir_name) - f.write("set DATADIR=%s\n\n" % data_dir_name) + f.write(f"set LOGDIR={log_dir_name}\n") + f.write(f"set DATADIR={data_dir_name}\n\n") f.write("call cleanup_data_dir.cmd\n\n") f.write("REM Add here all transform commands\n") f.write("REM python my_model.py > %LOGDIR%\\transform_$1_out.log 2> %LOGDIR%\\transform_$1_err.log\n") else: - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("#!/usr/bin/env bash\n\n") - f.write("LOGDIR=%s\n" % log_dir_name) - f.write("DATADIR=%s\n\n" % data_dir_name) + f.write(f"LOGDIR={log_dir_name}\n") + f.write(f"DATADIR={data_dir_name}\n\n") f.write("COLOR='\\033[1;32m'\n") f.write("NC='\\033[0m'\n\n") f.write("msg() {\n") @@ -145,7 +169,7 @@ def create_transform_script(file): f.write(" sleep 1\n") f.write(" done\n") f.write(" end=$(date +%s.%3N)\n") - f.write(" runtime=$(python -c \"print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)\")\n") + f.write(" runtime=$(python -c \"print(f'{int(float(end) - float(start))/60}:{int(float(end) - float(start))%60:02}')\")\n") f.write(" printf \"] $runtime \\n\"\n") f.write("}\n\n") f.write("load_script() {\n") @@ -158,24 +182,26 @@ def create_transform_script(file): f.write("# Add here all transform commands\n") f.write("# load_script python_script (without extension)\n") f.write("chmod +x *.sh\n") - os.chmod(file, 0o755) - + file.chmod(0o755) + @check_file_exists -def create_load_script(file): - """ - Create the shell script skeleton that launches all load commands. +def create_load_script(file: Path) -> None: + """Create the shell script skeleton that launches all load commands. + + Args: + file: The path to the load script. """ if platform.system() == 'Windows': - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("@echo off\n\n") - f.write("set LOGDIR=%s\n\n" % log_dir_name) + f.write(f"set LOGDIR={log_dir_name}\n\n") f.write("REM Add here all load commands\n") f.write("REM my_model.cmd > %LOGDIR%\\load_$1_out.log 2> %LOGDIR%\\load_$1_err.log\n") else: - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("#!/usr/bin/env bash\n\n") - f.write("LOGDIR=%s\n\n" % log_dir_name) + f.write(f"LOGDIR={log_dir_name}\n\n") f.write("COLOR='\\033[1;32m'\n") f.write("NC='\\033[0m'\n\n") f.write("user_interrupt() {\n") @@ -197,7 +223,7 @@ def create_load_script(file): f.write(" sleep 1\n") f.write(" done\n") f.write(" end=$(date +%s.%3N)\n") - f.write(" runtime=$(python -c \"print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)\")\n") + f.write(" runtime=$(python -c \"print(f'{int(float(end) - float(start))/60}:{int(float(end) - float(start))%60:02}')\")\n") f.write(" printf \"] $runtime \\n\"\n") f.write("}\n\n") f.write("load_script() {\n") @@ -209,19 +235,21 @@ def create_load_script(file): f.write("trap user_interrupt SIGTSTP\n\n") f.write("# Add here all load commands\n") f.write("# load_script shell_script (without extension)\n") - os.chmod(file, 0o755) + file.chmod(0o755) @check_file_exists -def create_file_prefix(file): - """ - Create the skeleton of prefix.py. +def create_file_prefix(file: Path) -> None: + """Create the skeleton of prefix.py. + + Args: + file: The path to the prefix.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This file defines xml_id prefixes and projectwise variables.\n\n") f.write("# Defines here a identifier used in the created XML_ID.\n") - f.write("project_name = '%s'\n" % project_name) + f.write(f"project_name = '{project_name}'\n") f.write("\n") f.write("DEFAULT_WORKER = 1\n") f.write("DEFAULT_BATCH_SIZE = 20\n") @@ -242,14 +270,15 @@ def create_file_prefix(file): f.write("# 'E': 'en_US',\n") f.write("}\n\n") f.write("# XML ID PREFIXES\n") - @check_file_exists -def create_file_mapping(file): - """ - Create the skeleton of mapping.py. +def create_file_mapping(file: Path) -> None: + """Create the skeleton of mapping.py. + + Args: + file: The path to the mapping.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This file defines mapping dictionaries.\n\n") f.write("# MAPPING DICTIONARIES\n") @@ -258,159 +287,124 @@ def create_file_mapping(file): @check_file_exists -def create_file_files(file): - """ - Create the skeleton of files.py. +def create_file_files(file: Path) -> None: + """Create the skeleton of files.py. + + Args: + file: The path to the files.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This file defines the names of all used files.\n\n") - f.write("import os\n") + f.write("from pathlib import Path\n") f.write("\n") f.write("# Folders\n") - f.write("conf_dir = '%s'\n" % conf_dir_name) - f.write("data_src_dir = '%s'\n" % orig_dir_name) - f.write("data_raw_dir = '%s%sbinary%s' % (data_src_dir,os.sep, os.sep)\n") - f.write("data_dest_dir = '%s'\n" % data_dir_name) + f.write(f"conf_dir = Path('{conf_dir_name}')\n") + f.write(f"data_src_dir = Path('{orig_dir_name}')\n") + f.write(f"data_raw_dir = data_src_dir / 'binary'\n") + f.write(f"data_dest_dir = Path('{data_dir_name}')\n") f.write("\n") f.write("# Configuration\n") - f.write("config_file = os.path.join(conf_dir,'connection.conf')\n") + f.write("config_file = conf_dir / 'connection.conf'\n") f.write("\n") f.write("# Declare here all data files\n") if not model: - f.write("# Client file: src_my_model = os.path.join(data_src_dir, 'my_model.csv')\n") - f.write("# Import file: dest_my_model = os.path.join(data_dest_dir, 'my.model.csv')\n") - + f.write("# Client file: src_my_model = data_src_dir / 'my_model.csv'\n") + f.write("# Import file: dest_my_model = data_dest_dir / 'my.model.csv'\n") + f.write("\n") @check_file_exists -def create_file_lib(file): - """ - Create the skeleton of funclib.py. +def create_file_lib(file: Path) -> None: + """Create the skeleton of funclib.py. + + Args: + file: The path to the funclib.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This file defines common functions.\n\n") - f.write("from odoo_csv_tools.lib import mapper\n") - f.write("from odoo_csv_tools.lib.transform import Processor\n") + f.write("from odoo_data_flow.lib import mapper\n") + f.write("from odoo_data_flow.lib.transform import Processor\n") f.write("from prefix import *\n") f.write("from mapping import *\n") f.write("from datetime import datetime\n") f.write("\n\n") f.write("nvl = lambda a, b: a or b\n") f.write("\n\n") - f.write("def keep_numbers(val):\n") - f.write(" return filter(lambda x: x.isdigit(), val)\n") + f.write("def keep_numbers(val: str) -> str:\n") + f.write(" return ''.join(filter(str.isdigit, val))\n") f.write("\n\n") - f.write("def keep_letters(val):\n") - f.write(" return filter(lambda x: x.isalpha(), val)\n") + f.write("def keep_letters(val: str) -> str:\n") + f.write(" return ''.join(filter(str.isalpha, val))\n") f.write("\n\n") - f.write("def remove_accents(val):\n") - f.write(" replacements = [\n") - f.write(" (u'\%s', 'a'),\n" % 'xe0') - f.write(" (u'\%s', 'a'),\n" % 'xe1') - f.write(" (u'\%s', 'a'),\n" % 'xe2') - f.write(" (u'\%s', 'a'),\n" % 'xe3') - f.write(" (u'\%s', 'a'),\n" % 'xe4') - f.write(" (u'\%s', 'a'),\n" % 'xe5') - f.write(" (u'\%s', 'A'),\n" % 'xc0') - f.write(" (u'\%s', 'A'),\n" % 'xc1') - f.write(" (u'\%s', 'A'),\n" % 'xc2') - f.write(" (u'\%s', 'A'),\n" % 'xc3') - f.write(" (u'\%s', 'A'),\n" % 'xc4') - f.write(" (u'\%s', 'A'),\n" % 'xc5') - f.write(" (u'\%s', 'e'),\n" % 'xe8') - f.write(" (u'\%s', 'e'),\n" % 'xe9') - f.write(" (u'\%s', 'e'),\n" % 'xea') - f.write(" (u'\%s', 'e'),\n" % 'xeb') - f.write(" (u'\%s', 'E'),\n" % 'xc8') - f.write(" (u'\%s', 'E'),\n" % 'xc9') - f.write(" (u'\%s', 'E'),\n" % 'xca') - f.write(" (u'\%s', 'E'),\n" % 'xcb') - f.write(" (u'\%s', 'i'),\n" % 'xec') - f.write(" (u'\%s', 'i'),\n" % 'xed') - f.write(" (u'\%s', 'i'),\n" % 'xee') - f.write(" (u'\%s', 'i'),\n" % 'xef') - f.write(" (u'\%s', 'I'),\n" % 'xcc') - f.write(" (u'\%s', 'I'),\n" % 'xcd') - f.write(" (u'\%s', 'I'),\n" % 'xce') - f.write(" (u'\%s', 'I'),\n" % 'xcf') - f.write(" (u'\%s', 'o'),\n" % 'xf2') - f.write(" (u'\%s', 'o'),\n" % 'xf3') - f.write(" (u'\%s', 'o'),\n" % 'xf4') - f.write(" (u'\%s', 'o'),\n" % 'xf5') - f.write(" (u'\%s', 'o'),\n" % 'xf6') - f.write(" (u'\%s', 'O'),\n" % 'xd2') - f.write(" (u'\%s', 'O'),\n" % 'xd3') - f.write(" (u'\%s', 'O'),\n" % 'xd4') - f.write(" (u'\%s', 'O'),\n" % 'xd5') - f.write(" (u'\%s', 'O'),\n" % 'xd6') - f.write(" (u'\%s', 'c'),\n" % 'xe7') - f.write(" ]\n") - f.write(" for a, b in replacements:\n") - f.write(" val = val.replace(a, b)\n") + f.write("# For more complex cases, consider using the `unidecode` library.\n") + f.write("def remove_accents(val: str) -> str:\n") f.write(" return val\n") f.write("\n\n") f.write("def keep_column_value(val, column):\n") f.write(" def keep_column_value_fun(line):\n") f.write(" if line[column] != val:\n") - f.write(" raise SkippingException(\"Column %s with wrong value %s\" % (column, val))\n") + f.write(" raise SkippingException(f\"Column {column} with wrong value {val}\")\n") f.write(" return line[column]\n") f.write(" return keep_column_value_fun\n") f.write("\n") + @check_file_exists -def create_file_clean_data(file): - """ - Create the skeleton of clean_data.py. +def create_file_clean_data(file: Path) -> None: + """Create the skeleton of clean_data.py. + + Args: + file: The path to the clean_data.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This script remove the data created by the import.\n\n") f.write("import odoolib\n") - f.write("from odoo_csv_tools.lib import conf_lib\n") + f.write("from odoo_data_flow.lib import conf_lib\n") f.write("from prefix import *\n") f.write("from files import *\n\n") f.write("connection = conf_lib.get_server_connection(config_file)\n\n") - f.write("def delete_model_data(connection, model, demo = False):\n") + f.write("def delete_model_data(connection, model, demo: bool = False) -> None:\n") f.write(" model_model = connection.get_model(model)\n") f.write(" record_ids = model_model.search([])\n") f.write(" if demo:\n") - f.write(" print 'Will remove %s records from %s' % (len(record_ids), model)\n") + f.write(" print(f'Will remove {len(record_ids)} records from {model}')\n") f.write(" else:\n") - f.write(" print 'Remove %s records from %s' % (len(record_ids), model)\n") + f.write(" print(f'Remove {len(record_ids)} records from {model}')\n") f.write(" model_model.unlink(record_ids)\n") f.write("\n\n") - f.write("def delete_xml_id(connection, model, module, demo = False):\n") + f.write("def delete_xml_id(connection, model, module, demo: bool = False) -> None:\n") f.write(" data_model = connection.get_model('ir.model.data')\n") f.write(" data_ids = data_model.search([('module', '=', module), ('model', '=', model)])\n") f.write(" records = data_model.read(data_ids, ['res_id'])\n") - f.write(" record_ids = []\n") - f.write(" for rec in records:\n") - f.write(" record_ids.append(rec['res_id'])\n") + f.write(" record_ids = [rec['res_id'] for rec in records]\n") f.write(" if demo:\n") - f.write(" print 'Will remove %s xml_id %s from %s' % (len(record_ids), module, model)\n") + f.write(" print(f'Will remove {len(record_ids)} xml_id {module} from {model}')\n") f.write(" else:\n") - f.write(" print 'Remove %s xml_id %s from %s' % (len(record_ids), module, model)\n") + f.write(" print(f'Remove {len(record_ids)} xml_id {module} from {model}')\n") f.write(" connection.get_model(model).unlink(record_ids)\n") f.write("\n\n") f.write("demo = True\n\n") @check_file_exists -def create_file_install_lang(file): - """ - Create the skeleton of install_lang.py. +def create_file_install_lang(file: Path) -> None: + """Create the skeleton of install_lang.py. + + Args: + file: The path to the install_lang.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("import odoolib\n") f.write("from prefix import *\n") f.write("from files import *\n") - f.write("from odoo_csv_tools.lib import conf_lib\n\n") + f.write("from odoo_data_flow.lib import conf_lib\n\n") f.write("connection = conf_lib.get_server_connection(config_file)\n\n") f.write("model_lang = connection.get_model('base.language.install')\n\n") f.write("for key in res_lang_map.keys():\n") @@ -420,18 +414,20 @@ def create_file_install_lang(file): @check_file_exists -def create_file_install_modules(file): - """ - Create the skeleton of install_modules.py. +def create_file_install_modules(file: Path) -> None: + """Create the skeleton of install_modules.py. + + Args: + file: The path to the install_modules.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("import sys\n") f.write("import odoolib\n") f.write("from prefix import *\n") f.write("from files import *\n") - f.write("from odoo_csv_tools.lib import conf_lib\n") - f.write("from odoo_csv_tools.lib.internal.rpc_thread import RpcThread\n") + f.write("from odoo_data_flow.lib import conf_lib\n") + f.write("from odoo_data_flow.lib.internal.rpc_thread import RpcThread\n") f.write("from files import config_file\n\n") f.write("connection = conf_lib.get_server_connection(config_file)\n\n") f.write("model_module = connection.get_model('ir.module.module')\n") @@ -448,18 +444,20 @@ def create_file_install_modules(file): @check_file_exists -def create_file_uninstall_modules(file): - """ - Create the skeleton of uninstall_modules.py. +def create_file_uninstall_modules(file: Path) -> None: + """Create the skeleton of uninstall_modules.py. + + Args: + file: The path to the uninstall_modules.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("import sys\n") f.write("import odoolib\n") f.write("from prefix import *\n") f.write("from files import *\n") - f.write("from odoo_csv_tools.lib import conf_lib\n") - f.write("from odoo_csv_tools.lib.internal.rpc_thread import RpcThread\n") + f.write("from odoo_data_flow.lib import conf_lib\n") + f.write("from odoo_data_flow.lib.internal.rpc_thread import RpcThread\n") f.write("from files import config_file\n\n") f.write("connection = conf_lib.get_server_connection(config_file)\n\n") f.write("model_module = connection.get_model('ir.module.module')\n") @@ -474,20 +472,22 @@ def create_file_uninstall_modules(file): @check_file_exists -def create_file_init_map(file): - """ - Create the skeleton of init_map.py. +def create_file_init_map(file: Path) -> None: + """Create the skeleton of init_map.py. + + Args: + file: The path to the init_map.py file. """ - with open(file, 'w') as f: + with file.open('w', encoding='utf-8') as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("import odoolib\n") f.write("from prefix import *\n") f.write("from files import *\n") - f.write("from odoo_csv_tools.lib import conf_lib\n") + f.write("from odoo_data_flow.lib import conf_lib\n") f.write("import json\n") f.write("import io\n\n") f.write("connection = conf_lib.get_server_connection(config_file)\n\n") - f.write("def build_map_product_category_id(filename=''):\n") + f.write("def build_map_product_category_id(filename: str = '') -> dict:\n") f.write(" # Build a dictionary {product_category : xml_id} of all existing product_category.\n") f.write(" model_data = connection.get_model('ir.model.data')\n") f.write(" model_product_category = connection.get_model('product.category')\n") @@ -500,7 +500,7 @@ def create_file_init_map(file): f.write(" val = '.'.join([data[0]['module'], data[0]['name'] ])\n") f.write(" res_map[key] = val.strip()\n") f.write(" # else:\n") - f.write(" # print 'Product category %s has no XML_ID (id: %s)' % (rec['name'], rec['id'])\n\n") + f.write(" # print(f'Product category {rec['name']} has no XML_ID (id: {rec['id']})')\n\n") f.write(" if filename:\n") f.write(" with open(filename, 'w') as fp:\n") f.write(" json.dump(res_map, fp)\n\n") @@ -508,11 +508,11 @@ def create_file_init_map(file): f.write("# Execute mapping\n") f.write("# dummy = build_map_product_category_id(work_map_product_category_id)\n\n") f.write("# Add in files.py\n") - f.write("# work_map_product_category_id = os.path.join(data_src_dir, 'work_map_product_category_id.json')\n\n") + f.write("# work_map_product_category_id = data_src_dir / 'work_map_product_category_id.json'\n\n") f.write("# Add in transformation script\n") f.write("# map_product_category_id = {}\n") - f.write("# with io.open(work_map_product_category_id, 'r') as fp:\n") - f.write("# map_product_category_id = json.load(fp, encoding='utf-8')\n\n") + f.write("# with io.open(work_map_product_category_id, 'r', encoding='utf-8') as fp:\n") + f.write("# map_product_category_id = json.load(fp)\n\n") f.write("# Add in transformation script to map 'id' column. REVIEW COLUNM NAME and PREFIX\n") f.write("# def handle_product_category_id(line):\n") f.write("# categ_name = line['Product Category']\n") @@ -522,7 +522,7 @@ def create_file_init_map(file): f.write("# categ_xml_id = mapper.m2o(PREFIX_PRODUCT_CATEGORY, 'Product Category')(line)\n") f.write("# return categ_xml_id\n\n") f.write("##################################################################################################\n\n") - f.write("def build_account_map(company_id, filename=''):\n") + f.write("def build_account_map(company_id: int, filename: str = '') -> dict:\n") f.write(" # Build a dictionary {account_code : xml_id} of all existing accounts of a company.\n") f.write(" model_data = connection.get_model('ir.model.data')\n") f.write(" model_account = connection.get_model('account.account')\n") @@ -535,7 +535,7 @@ def create_file_init_map(file): f.write(" val = '.'.join([data[0]['module'], data[0]['name'] ])\n") f.write(" res_map[key] = val.strip()\n") f.write(" # else:\n") - f.write(" # print 'Account %s has no XML_ID' % rec['code']\n\n") + f.write(" # print(f'Account {rec['code']} has no XML_ID')\n\n") f.write(" if filename:\n") f.write(" with open(filename, 'w') as fp:\n") f.write(" json.dump(res_map, fp)\n\n") @@ -543,11 +543,11 @@ def create_file_init_map(file): f.write("# Execute mapping\n") f.write("# dummy = build_account_map(1, work_map_account_code_id)\n\n") f.write("# Add in files.py\n") - f.write("# work_map_account_code_id = os.path.join(data_src_dir, 'work_map_account_code_id.json')\n\n") + f.write("# work_map_account_code_id = data_src_dir / 'work_map_account_code_id.json'\n\n") f.write("# Add in transformation script\n") f.write("# map_account_code_id = {}\n") - f.write("# with io.open(work_map_account_code_id, 'r') as fp:\n") - f.write("# map_account_code_id = json.load(fp, encoding='utf-8')\n\n") + f.write("# with io.open(work_map_account_code_id, 'r', encoding='utf-8') as fp:\n") + f.write("# map_account_code_id = json.load(fp)\n\n") f.write("# Add in transformation script to map 'id' column. REVIEW COLUNM NAME and PREFIX\n") f.write("# def handle_account_account_id_map(line):\n") f.write("# code = line['Accounts']\n") @@ -559,63 +559,63 @@ def create_file_init_map(file): f.write("##################################################################################################\n\n") -def scaffold_dir(): - """ - Create the whole directory structure and the basic project files. - """ - create_folder(conf_dir) - create_folder(orig_dir) - create_folder(orig_raw_dir) - create_folder(data_dir) - create_folder(log_dir) - - create_connection_file_local(os.path.join(conf_dir, 'connection.conf')) - create_connection_file_local(os.path.join(conf_dir, 'connection.local')) - create_connection_file_remote(os.path.join(conf_dir, 'connection.staging'), '.dev.odoo.com') - create_connection_file_remote(os.path.join(conf_dir, 'connection.master'), '.odoo.com') - - create_cleanup_script(os.path.join(base_dir, '%s%s' % ('cleanup_data_dir', script_extension))) - create_transform_script(os.path.join(base_dir, '%s%s' % ('transform', script_extension))) - create_load_script(os.path.join(base_dir, '%s%s' % ('load', script_extension))) - create_file_prefix(os.path.join(base_dir, 'prefix.py')) - create_file_mapping(os.path.join(base_dir, 'mapping.py')) - create_file_files(os.path.join(base_dir, 'files.py')) - create_file_lib(os.path.join(base_dir, 'funclib.py')) - create_file_clean_data(os.path.join(base_dir, 'clean_data.py')) - create_file_install_lang(os.path.join(base_dir, 'install_lang.py')) - create_file_install_modules(os.path.join(base_dir, 'install_modules.py')) - create_file_uninstall_modules(os.path.join(base_dir, 'uninstall_modules.py')) - create_file_init_map(os.path.join(base_dir, 'init_map.py')) - - sys.stdout.write("Project created in %s\n" % os.path.abspath(base_dir)) +def scaffold_dir() -> None: + """Create the whole directory structure and the basic project files.""" + create_folder(Path(conf_dir)) + create_folder(Path(orig_dir)) + create_folder(Path(orig_raw_dir)) + create_folder(Path(data_dir)) + create_folder(Path(log_dir)) + + create_connection_file_local(Path(conf_dir) / 'connection.conf') + create_connection_file_local(Path(conf_dir) / 'connection.local') + create_connection_file_remote(Path(conf_dir) / 'connection.staging', '.dev.odoo.com') + create_connection_file_remote(Path(conf_dir) / 'connection.master', '.odoo.com') + + create_cleanup_script(Path(base_dir) / f'cleanup_data_dir{script_extension}') + create_transform_script(Path(base_dir) / f'transform{script_extension}') + create_load_script(Path(base_dir) / f'load{script_extension}') + create_file_prefix(Path(base_dir) / 'prefix.py') + create_file_mapping(Path(base_dir) / 'mapping.py') + create_file_files(Path(base_dir) / 'files.py') + create_file_lib(Path(base_dir) / 'funclib.py') + create_file_clean_data(Path(base_dir) / 'clean_data.py') + create_file_install_lang(Path(base_dir) / 'install_lang.py') + create_file_install_modules(Path(base_dir) / 'install_modules.py') + create_file_uninstall_modules(Path(base_dir) / 'uninstall_modules.py') + create_file_init_map(Path(base_dir) / 'init_map.py') + + sys.stdout.write(f"Project created in {Path(base_dir).resolve()}\n") ############################################################################## # FUNCTIONS FOR MODEL SKELETON CODE ############################################################################## class ModelField: + """Manages how to get a suited mapper function and how to document itself. + + Args: + connection: The Odoo connection object. + properties: A dictionary of field properties. """ - - manage how to get a suited mapper function (get_mapper_command) - - manage how to document itself (get_info) - """ - def __init__(self, connection, properties): + def __init__(self, connection: Any, properties: Dict[str, Any]) -> None: self.connection = connection self.properties = properties - self.import_warn_msg = [] - self.id = properties.get('id') - self.name = properties.get('name') - self.type = properties.get('ttype') - self.required = properties.get('required') - self.readonly = properties.get('readonly') - self.string = properties.get('field_description') - self.store = properties.get('store') - self.track_visibility = properties.get('track_visibility') - self.related = properties.get('related') - self.relation = properties.get('relation') - self.depends = properties.get('depends') - self.compute = self.__get_compute() - self.selection = self.__get_selection() - self.default_value = self.__get_default() + self.import_warn_msg: List[str] = [] + self.id: int = properties.get('id') + self.name: str = properties.get('name') + self.type: str = properties.get('ttype') + self.required: bool = properties.get('required') + self.readonly: bool = properties.get('readonly') + self.string: str = properties.get('field_description') + self.store: bool = properties.get('store') + self.track_visibility: str = properties.get('track_visibility') + self.related: str = properties.get('related') + self.relation: str = properties.get('relation') + self.depends: str = properties.get('depends') + self.compute: List[str] = self._get_compute() + self.selection: List[str] = self._get_selection() + self.default_value: List[str] = self._get_default() # Reasons avoiding to import a field -> commented by get_info if self.related and self.store: @@ -625,161 +625,152 @@ def __init__(self, connection, properties): if len(self.compute) > 1: self.import_warn_msg.append('computed') + def _get_selection(self) -> List[str]: + """Fetch the selection values of a field. - def __get_selection(self): - """ - Fetch the selection values of a field. - Return a list of strings "'technical_value': 'visible_value'" - or an emplty list if no selection exists. + Returns: + A list of strings "'technical_value': 'visible_value'" + or an empty list if no selection exists. """ - l = [] + selection_list = [] model_model = self.connection.get_model(model) try: vals = model_model.fields_get([self.name]).get(self.name).get('selection') if not vals: - return l + return selection_list for sel in vals: - l.append("'%s'%s%s" % (sel[0], selection_sep, sel[1])) - except: + selection_list.append(f"'{sel[0]}'{selection_sep}{sel[1]}") + except Exception: pass - return l + return selection_list - def __get_default(self): - """ - Fetch the default value of a field. - Return a list of strings (because the default value can be a multiline value) - or an emplty list if no default value exists. + def _get_default(self) -> List[str]: + """Fetch the default value of a field. + + Returns: + A list of strings (because the default value can be a multiline value) + or an empty list if no default value exists. """ model_model = self.connection.get_model(model) val = model_model.default_get([self.name]).get(self.name, '') - l = [] + lines = str(val).splitlines() - try: - val = str(val) - except: - pass - - for line in val.splitlines(): - l.append(line) + if maxdescr > -1 and len(lines) > max(1, maxdescr): + lines = lines[:maxdescr] + ['[truncated...]'] - if maxdescr > -1 and len(l) > max(1, maxdescr): - l = l[:maxdescr] + ['[truncated...]'] + return lines - return l + def _get_compute(self) -> List[str]: + """Fetch the compute method of a field. - def __get_compute(self): - """ - Fetch the compute method of a field. - Return a list of strings (because the compute method is often a multiline value) - or an emplty list if no compute method exists. + Returns: + A list of strings (because the compute method is often a multiline value) + or an empty list if no compute method exists. """ val = self.properties.get('compute') - l = [] + lines = str(val).splitlines() - val = str(val) - for line in val.splitlines(): - l.append(line) + if maxdescr > -1 and len(lines) > maxdescr: + lines = lines[:maxdescr] + ['[truncated...]'] - if maxdescr > -1 and len(l) > maxdescr: - l = l[:maxdescr] + ['[truncated...]'] + return lines - return l + def get_info(self) -> str: + """Build the complete block of text describing a field. - def get_info(self): + Returns: + A string containing the field information. """ - Build the complete block of text describing a field. - """ - self.info = "%s (#%s): %s%s%s%s%s," % ( self.string, self.id, - 'stored' if self.store else 'non stored', - ', required' if self.required else ', optional', - ', readonly' if self.readonly else '', - ', track_visibility (%s)' % self.track_visibility if self.track_visibility else '', - ', related (=%s)' % self.related if self.related else '', - ) - - self.info = "%s %s" % (self.info, self.type) + info = f"{self.string} (#{self.id}): {'stored' if self.store else 'non stored'}" + info += f", {'required' if self.required else 'optional'}" + if self.readonly: + info += ", readonly" + if self.track_visibility: + info += f", track_visibility ({self.track_visibility})" + if self.related: + info += f", related (={self.related})" + info += f", {self.type}" if self.relation: - self.info = "%s -> %s" % (self.info, self.relation) + info += f" -> {self.relation}" # Add XMLID summary in field info model_data = self.connection.get_model('ir.model.data') - external_prefixes = model_data.read_group([('model', '=', self.relation)], ['module'], ['module']) + external_prefixes = model_data.read_group( + [('model', '=', self.relation)], ['module'], ['module'] + ) if external_prefixes: - self.info = "%s - with xml_id in module(s):" % self.info + info += " - with xml_id in module(s):" for data in external_prefixes: - self.info = '%s %s(%s)' % (self.info, data['module'], data['module_count']) + info += f" {data['module']}({data['module_count']})" if self.selection: - self.info = "%s\n # SELECTION: %s" % (self.info, ', '.join(self.selection)) + info += f"\n # SELECTION: {', '.join(self.selection)}" if self.default_value: if len(self.default_value) == 1: - self.info = '%s\n # DEFAULT: %s' % (self.info, self.default_value[0]) + info += f"\n # DEFAULT: {self.default_value[0]}" else: - self.info = '%s\n # DEFAULT: \n # %s' % (self.info, '\n # '.join(self.default_value)) + info += f"\n # DEFAULT: \n # {'\n # '.join(self.default_value)}" if len(self.compute) > 1: - self.info = '%s\n # COMPUTE: depends on %s\n # %s' % (self.info, self.depends, '\n # '.join(self.compute)) + info += f"\n # COMPUTE: depends on {self.depends}\n # {'\n # '.join(self.compute)}" if self.import_warn_msg: - self.info = "%s\n%s %s" % (self.info, ' # AVOID THIS FIELD:', ', '.join(self.import_warn_msg)) - - if sys.version_info >= (3, 0, 0): - return self.info - else: - return u''.join((self.info)).encode('utf-8') + info += f"\n # AVOID THIS FIELD: {', '.join(self.import_warn_msg)}" - def get_name(self): - return self.name if fieldname=='tech' else self.string + return info - def get_mapping_name(self): - """ - Return the field name as needed in the import file. - """ - return '/'.join((self.name, 'id')) if self.type in ('many2one', 'many2many') else self.name - - def get_mapper_command(self): - """ - Return a suited mapper function according to the field properties and skeleton options. - """ + def get_name(self) -> str: + """Returns the technical or user-friendly name of the field.""" + return self.name if fieldname == 'tech' else self.string + + def get_mapping_name(self) -> str: + """Return the field name as needed in the import file.""" + return f"{self.name}/id" if self.type in ('many2one', 'many2many') else self.name + + def get_mapper_command(self) -> str: + """Return a suited mapper function according to the field properties and skeleton options.""" if self.name == 'id': if wxmlid: - return "mapper.val('%s')" % self.name + return f"mapper.val('{self.name}')" else: return "mapper.m2o_map(OBJECT_XMLID_PREFIX, mapper.concat('_', 'CSV_COLUMN1','CSV_COLUMN2'))" - + elif self.type in ('integer', 'float', 'monetary'): - return "mapper.num('%s')" % self.get_name() + return f"mapper.num('{self.get_name()}')" elif self.type in ('boolean'): - return "mapper.bool_val('%s', true_vals=true_values, false_vals=false_values)" % self.get_name() + return f"mapper.bool_val('{self.get_name()}', true_vals=true_values, false_vals=false_values)" elif self.type in ('datetime'): - return "mapper.val('%s', postprocess=lambda x: datetime.strptime(x, 'CSV_DATE_FORMAT').strftime('%%Y-%%m-%%d 00:00:00'))" % self.get_name() + return f"mapper.val('{self.get_name()}', postprocess=lambda x: datetime.strptime(x, 'CSV_DATE_FORMAT').strftime('%%Y-%%m-%%d 00:00:00'))" elif self.type in ('binary'): - return "mapper.binary('%s', data_raw_dir)" % self.get_name() + return f"mapper.binary('{self.get_name()}', data_raw_dir)" elif self.type in ('selection'): if mapsel: - return "mapper.map_val('%s', %s_%s_map)" % (self.get_name(), model_mapped_name, self.name) + return f"mapper.map_val('{self.get_name()}', {model_mapped_name}_{self.name}_map)" else: - return "mapper.val('%s')" % self.get_name() + return f"mapper.val('{self.get_name()}')" elif self.type in ('many2many') and not wxmlid: - return "mapper.m2m(PREFIX_%s, '%s')" % (self.relation.replace('.', '_').upper(), self.get_name()) + return f"mapper.m2m(PREFIX_{self.relation.replace('.', '_').upper()}, '{self.get_name()}')" elif self.type in ('many2one', 'one2many', 'many2many') and not wxmlid: - return "mapper.m2o(PREFIX_%s, '%s')" % (self.relation.replace('.', '_').upper(), self.get_name()) + return f"mapper.m2o(PREFIX_{self.relation.replace('.', '_').upper()}, '{self.get_name()}')" else: - return "mapper.val('%s')" % self.get_name() - - def is_required(self): - return self.required and len(self.default_value) == 0 + return f"mapper.val('{self.get_name()}')" + def is_required(self) -> bool: + """Return True if the field is required and has no default value.""" + return self.required and not self.default_value -def load_fields(): - """ - Build the model fields list, fetched as defined in the target database. + +def load_fields() -> List[ModelField]: + """Build the model fields list, fetched as defined in the target database. + + Returns: + A list of ModelField objects. """ global has_tracked_fields global has_computed_fields - has_tracked_fields, has_computed_fields = False, False + has_tracked_fields, has_computed_fields = False, False connection = conf_lib.get_server_connection(config) model_fields = connection.get_model('ir.model.fields') @@ -788,20 +779,22 @@ def load_fields(): ret = [] for field in fields: f = ModelField(connection, field) - has_tracked_fields = has_tracked_fields or f.track_visibility + has_tracked_fields = has_tracked_fields or bool(f.track_visibility) has_computed_fields = has_computed_fields or len(f.compute) > 1 ret.append(f) return ret - -def write_begin(file): - """ - Write the beginning of the generated python script. + +def write_begin(file: io.TextIOWrapper) -> None: + """Write the beginning of the generated python script. + + Args: + file: The file object to write to. """ file.write("# -*- coding: utf-8 -*-\n") file.write("\n") - file.write("from odoo_csv_tools.lib import mapper\n") - file.write("from odoo_csv_tools.lib.transform import Processor\n") + file.write("from odoo_data_flow.lib import mapper\n") + file.write("from odoo_data_flow.lib.transform import Processor\n") file.write("from prefix import *\n") file.write("from mapping import *\n") file.write("from files import *\n") @@ -810,10 +803,9 @@ def write_begin(file): file.write("\n") file.write("# Needed for RPC calls\n") file.write("# import odoolib\n") - file.write("# from odoo_csv_tools.lib import conf_lib\n") + file.write("# from odoo_data_flow.lib import conf_lib\n") file.write("# connection = conf_lib.get_server_connection(config_file)\n") - file.write("\n") - file.write("def preprocess_%s(header, data):\n" % model_class_name) + file.write(f"def preprocess_{model_class_name}(header, data):\n") file.write(" # Do nothing\n") file.write(" return header, data\n") file.write(" #\n") @@ -830,13 +822,15 @@ def write_begin(file): file.write(" # data_new.append(j)\n") file.write(" # return header, data_new\n") file.write("\n") - file.write("processor = Processor(src_%s, delimiter='%s', preprocess=preprocess_%s)\n" % (model_mapped_name, csv_delimiter, model_class_name)) + file.write(f"processor = Processor(src_{model_mapped_name}, delimiter='{csv_delimiter}', preprocess=preprocess_{model_class_name})\n") file.write("\n") -def write_end(file): - """ - Write the end of the generated python script. +def write_end(file: io.TextIOWrapper) -> None: + """Write the end of the generated python script. + + Args: + file: The file object to write to. """ ctx = '' ctx_opt = [] @@ -849,88 +843,93 @@ def write_end(file): if wmetadata: ctx_opt.append("'write_metadata': True") - if len(ctx_opt): - ctx = "'context': \"{%s}\", " % ', '.join(ctx_opt) + if ctx_opt: + ctx = f"'context': {{{', '.join(ctx_opt)}}}, " - # file.write("processor.process(%s, dest_%s, {'model': '%s', %s'groupby': '', 'ignore': '', 'worker': %s, 'batch_size': %s}, 'set', verbose=False)\n\n" % (model_mapping_name, model_mapped_name, model, ctx, default_worker, default_batch_size)) - file.write("processor.process(%s, dest_%s, {'model': '%s', %s'groupby': '', 'worker': DEFAULT_WORKER, 'batch_size': DEFAULT_BATCH_SIZE}, 'set', verbose=False)\n\n" % (model_mapping_name, model_mapped_name, model, ctx)) - file.write("processor.write_to_file('%s%s', python_exe='%s', path='%s')\n\n" % (model_mapped_name, script_extension, default_python_exe, default_path)) + file.write(f"processor.process({model_mapping_name}, dest_{model_mapped_name}, {{'model': '{model}', {ctx}'groupby': '', 'worker': DEFAULT_WORKER, 'batch_size': DEFAULT_BATCH_SIZE}}, 'set', verbose=False)\n\n") + file.write(f"processor.write_to_file('{model_mapped_name}{script_extension}', python_exe='{default_python_exe}', path='{default_path}')\n\n") -def write_mapping(file): - """ - Write the fields mapping of the generated python script. +def write_mapping(file: io.TextIOWrapper) -> None: + """Write the fields mapping of the generated python script. + + Args: + file: The file object to write to. """ if not dbname or offline: - file.write("%s = {\n 'id': ,\n}\n\n" % model_mapping_name) + file.write(f"{model_mapping_name} = {{\n 'id': None,\n}}\n\n") return fields = load_fields() - filtering = "lambda f: f.name != '__last_update'" - if wstored: - filtering = "%s and f.store" % filtering - if not wo2m: - filtering = "%s and f.type != 'one2many'" % filtering - if not wmetadata: - filtering = "%s and f.name not in ('create_uid', 'write_uid', 'create_date', 'write_date', 'active')" % filtering - fields = filter(eval(filtering), fields) + + def field_filter(f: ModelField) -> bool: + if f.name == '__last_update': + return False + if wstored and not f.store: + return False + if not wo2m and f.type == 'one2many': + return False + if not wmetadata and f.name in ('create_uid', 'write_uid', 'create_date', 'write_date', 'active'): + return False + return True + + fields = filter(field_filter, fields) fields = sorted(fields, key=lambda f: ((f.name != 'id'), not f.is_required(), f.name)) - + if skeleton == 'dict': - file.write('%s = {\n' % model_mapping_name) + file.write(f'{model_mapping_name} = {{\n') for f in fields: - if verbose: sys.stdout.write('Write field %s\n' % f.name) + if verbose: + sys.stdout.write(f'Write field {f.name}\n') line_start = '# ' if (required and not f.is_required() and f.name != 'id') or f.import_warn_msg else '' - file.write (" # %s\n" % f.get_info()) - file.write(" %s'%s': %s,\n" % (line_start,f.get_mapping_name(), f.get_mapper_command().replace('OBJECT_XMLID_PREFIX', 'PREFIX_%s' % model_mapped_name.upper()))) + file.write(f" # {f.get_info()}\n") + mapper_command = f.get_mapper_command().replace('OBJECT_XMLID_PREFIX', f'PREFIX_{model_mapped_name.upper()}') + file.write(f" {line_start}'{f.get_mapping_name()}': {mapper_command},\n") file.write('}\n\n') - + elif skeleton == 'map': - function_prefix = 'handle_%s_' % model_mapped_name + function_prefix = f'handle_{model_mapped_name}_' for f in fields: - if verbose: sys.stdout.write('Write map function of field %s\n' % f.name) + if verbose: + sys.stdout.write(f'Write map function of field {f.name}\n') line_start = '# ' if (required and not f.is_required()) or f.import_warn_msg else '' - file.write ("%sdef %s%s(line):\n" % (line_start, function_prefix, f.name)) - file.write ("%s return %s(line)\n\n" % (line_start,f.get_mapper_command().replace('OBJECT_XMLID_PREFIX', 'PREFIX_%s' % model_mapped_name.upper()))) - - file.write('%s = {\n' % model_mapping_name) + mapper_command = f.get_mapper_command().replace('OBJECT_XMLID_PREFIX', f'PREFIX_{model_mapped_name.upper()}') + file.write(f"{line_start}def {function_prefix}{f.name}(line):\n") + file.write(f"{line_start} return {mapper_command}(line)\n\n") + + file.write(f'{model_mapping_name} = {{\n') for f in fields: - if verbose: sys.stdout.write('Write field %s\n' % f.name) + if verbose: + sys.stdout.write(f'Write field {f.name}\n') line_start = '# ' if (required and not f.is_required() and f.name != 'id') or f.import_warn_msg else '' - file.write (" # %s\n" % f.get_info()) - file.write (" %s'%s': %s,\n" % (line_start,f.get_mapping_name(), '%s%s' % (function_prefix, f.name))) + file.write(f" # {f.get_info()}\n") + file.write(f" {line_start}'{f.get_mapping_name()}': {function_prefix}{f.name},\n") file.write('}\n\n') # Add selection dictionaries if --map-selection if mapsel: - if verbose: sys.stdout.write('Write mapping of selection fields\n') - if sys.version_info >= (3, 0, 0): - with open(os.path.join(base_dir, 'mapping.py'), 'a') as pf: - pf.write("# Selection fields in model %s\n\n" % model) - for f in filter(lambda x: x.type == 'selection', fields): - sys.stdout.write('Write mapping of selection field %s\n' % f.name) - line_start = '# ' if (required and not f.is_required()) else '' - pf.write("%s%s_%s_map = {\n" % (line_start, model_mapped_name, f.name)) - for sel in f.selection: - key, val = sel.split(selection_sep) - pf.write('%s "%s": %s,\n' % (line_start, val.strip(), key.strip())) - pf.write("%s}\n\n" % line_start) - else: - with io.open(os.path.join(base_dir, 'mapping.py'), 'a', encoding='utf-8') as pf: - pf.write("# Selection fields in model %s\n\n" % unicode(model, 'utf-8')) - for f in filter(lambda x: x.type == 'selection', fields): - sys.stdout.write('Write mapping of selection field %s\n' % f.name) - line_start = '# ' if (required and not f.is_required()) else '' - pf.write("%s%s_%s_map = {\n" % (line_start, model_mapped_name, f.name)) - for sel in f.selection: - key, val = sel.split(selection_sep) - pf.write('%s "%s": %s,\n' % (line_start, val.strip(), key.strip())) - pf.write("%s}\n\n" % unicode(line_start, 'utf-8')) - - -def model_exists(model): - """ - Return True if 'model' is scaffoldable. + if verbose: + sys.stdout.write('Write mapping of selection fields\n') + with open(Path(base_dir) / 'mapping.py', 'a', encoding='utf-8') as pf: + pf.write(f"# Selection fields in model {model}\n\n") + for f in filter(lambda x: x.type == 'selection', fields): + sys.stdout.write(f'Write mapping of selection field {f.name}\n') + line_start = '# ' if (required and not f.is_required()) else '' + pf.write(f"{line_start}{model_mapped_name}_{f.name}_map = {{\n") + for sel in f.selection: + key, val = sel.split(selection_sep) + pf.write(f'{line_start} "{val.strip()}": {key.strip()},\n') + pf.write(f"{line_start}}}\n\n") + + +def model_exists(model: str) -> bool: + """Return True if 'model' is scaffoldable. + + Args: + model: The model name to check. + + Returns: + True if the model exists, False otherwise. """ connection = conf_lib.get_server_connection(config) model_model = connection.get_model('ir.model') @@ -938,113 +937,106 @@ def model_exists(model): return res != 0 -def scaffold_model(): - """ - Create the python script. - """ +def scaffold_model() -> None: + """Create the python script.""" global offline global dbname global host - if sys.version_info >= (3, 0, 0): - import configparser as ConfigParser - else: - import ConfigParser - cfg = ConfigParser.RawConfigParser({'protocol': 'xmlrpc', 'port': 8069}) + import configparser + cfg = configparser.ConfigParser(defaults={'protocol': 'xmlrpc', 'port': 8069}) cfg.read(config) host = cfg.get('Connection', 'hostname') dbname = cfg.get('Connection', 'database') login = cfg.get('Connection', 'login') uid = cfg.get('Connection', 'uid') - sys.stdout.write("Using connection file: %s (db: %s, host: %s, login: %s, uid: %s)\n" % (config, dbname, host, login, uid)) + sys.stdout.write(f"Using connection file: {config} (db: {dbname}, host: {host}, login: {login}, uid: {uid})\n") if not dbname: offline = True elif not model_exists(model): - sys.stderr.write("Model %s not found\n" % model) + sys.stderr.write(f"Model {model} not found\n") return - + do_file = skeleton or offline + outfile_path = Path(outfile) - if os.path.isfile(outfile): + if outfile_path.is_file(): if force: - if verbose: sys.stdout.write("Output file %s already exists and will be overwritten.\n" % outfile) + if verbose: + sys.stdout.write(f"Output file {outfile_path} already exists and will be overwritten.\n") else: - sys.stderr.write("The file %s already exists.\n" % outfile) + sys.stderr.write(f"The file {outfile_path} already exists.\n") do_file = False if do_file: # Write the file - with open(outfile, 'w') as of: + with outfile_path.open('w', encoding='utf-8') as of: write_begin(of) write_mapping(of) write_end(of) if offline: - sys.stdout.write("Minimal skeleton code generated in %s%s\n" % (outfile, ' because no database is defined' if not dbname else '')) + sys.stdout.write(f"Minimal skeleton code generated in {outfile_path}{' because no database is defined' if not dbname else ''}\n") else: - sys.stdout.write("Skeleton code generated in %s\n" % outfile) + sys.stdout.write(f"Skeleton code generated in {outfile_path}\n") else: sys.stdout.write("Skeleton code not generated. Use option -k|--skeleton or -n|--offline or -f|--force to generate the python script.\n") - dirname = os.path.dirname(outfile) + dirname = outfile_path.parent if append: #Add command to transform script - script = os.path.join(dirname, 'transform%s' % script_extension) + script = dirname / f'transform{script_extension}' if platform.system() == 'Windows': - line = "echo Transform %s\npython %s.py > %s\\transform_%s_out.log 2> %s\\transform_%s_err.log\n" % (model_mapped_name, model_mapped_name, '%LOGDIR%', model_mapped_name, '%LOGDIR%', model_mapped_name) + line = f"echo Transform {model_mapped_name}\npython {model_mapped_name}.py > %LOGDIR%\\transform_{model_mapped_name}_out.log 2> %LOGDIR%\\transform_{model_mapped_name}_err.log\n" else: - os.system('sed -i "$ d" %s' % script) - line = 'load_script %s\nchmod +x *.sh\n' % model_mapped_name - with open (script, 'a') as f: + os.system(f'sed -i "$ d" {script}') + line = f'load_script {model_mapped_name}\nchmod +x *.sh\n' + with script.open('a', encoding='utf-8') as f: f.write(line) - sys.stdout.write('Script %s.py added in %s\n' % (model_mapped_name, script)) + sys.stdout.write(f'Script {model_mapped_name}.py added in {script}\n') #Add command to load script - script = os.path.join(dirname, 'load%s' % script_extension) + script = dirname / f'load{script_extension}' if platform.system() == 'Windows': - line = "echo Load %s\ncall %s%s > %s\\load_%s_out.log 2> %s\\load_%s_err.log\n" % (model_mapped_name, model_mapped_name, script_extension, '%LOGDIR%', model_mapped_name, '%LOGDIR%', model_mapped_name) + line = f"echo Load {model_mapped_name}\ncall {model_mapped_name}{script_extension} > %LOGDIR%\\load_{model_mapped_name}_out.log 2> %LOGDIR%\\load_{model_mapped_name}_err.log\n" else: - line = 'load_script %s\n' % (model_mapped_name) - with open (script, 'a') as f: + line = f'load_script {model_mapped_name}\n' + with script.open('a', encoding='utf-8') as f: f.write(line) - sys.stdout.write('Script %s%s added in %s\n' % (model, script_extension, script)) + sys.stdout.write(f'Script {model}{script_extension} added in {script}\n') # Add model to prefix.py if not wxmlid: - script = os.path.join(dirname, 'prefix.py') - line = "PREFIX_%s = '%s_%s' %s project_name\n" % (model_mapped_name.upper(), '%s', model_mapped_name, '%') - with open (script, 'a') as f: + script = dirname / 'prefix.py' + line = f"PREFIX_{model_mapped_name.upper()} = f'{{project_name}}_{model_mapped_name}'\n" + with script.open('a', encoding='utf-8') as f: f.write(line) - sys.stdout.write('Prefix PREFIX_%s added in %s\n' % (model_mapped_name.upper(), script)) + sys.stdout.write(f'Prefix PREFIX_{model_mapped_name.upper()} added in {script}\n') else: - if verbose: sys.stdout.write('XML_ID prefix not added because of option --with-xmlid\n') - + if verbose: + sys.stdout.write('XML_ID prefix not added because of option --with-xmlid\n') + # Add model to files.py - script = os.path.join(dirname, 'files.py') - with open (script, 'a') as f: - f.write("# Model %s\n" % model) - f.write("src_%s = os.path.join(data_src_dir, '%s.csv')\n" % (model_mapped_name , model_mapped_name)) - f.write("dest_%s = os.path.join(data_dest_dir, '%s.csv')\n" % (model_mapped_name , model)) - sys.stdout.write('%s files added in %s\n' % (model, script)) + script = dirname / 'files.py' + with script.open('a', encoding='utf-8') as f: + f.write(f"# Model {model}\n") + f.write(f"src_{model_mapped_name} = data_src_dir / '{model_mapped_name}.csv'\n") + f.write(f"dest_{model_mapped_name} = data_dest_dir / '{model}.csv'\n") + sys.stdout.write(f'{model} files added in {script}\n') # Add model to clean_data.py - script = os.path.join(dirname, 'clean_data.py') - # line = "PREFIX_%s = '%s_%s' %s project_name\n" % (model_mapped_name.upper(), '%s', model_mapped_name, '%') - # line = "delete_xml_id(connection, '%s', '%s_%s' %s (demo, project_name))\n" % (model, '%s', model_mapped_name, '%') - with open(script, 'a') as f: - f.write("delete_xml_id(connection, '%s', '%s_%s' %s project_name, demo)\n" % (model, '%s', model_mapped_name, '%')) - sys.stdout.write('Model %s added in %s\n' % (model, script)) + script = dirname / 'clean_data.py' + with script.open('a', encoding='utf-8') as f: + f.write(f"delete_xml_id(connection, '{model}', f'{{project_name}}_{model_mapped_name}}', demo)\n") + sys.stdout.write(f'Model {model} added in {script}\n') else: - sys.stdout.write("You should probably add this model in files.py, prefix.py, clean_data.py, transform%s and load%s with -a|--append\n" % (script_extension, script_extension)) - + sys.stdout.write(f"You should probably add this model in files.py, prefix.py, clean_data.py, transform{script_extension} and load{script_extension} with -a|--append\n") -############################################################################## -# OTHER ACTIONS -############################################################################## -def list_models(): +def list_models() -> None: + """List installed models in the target Odoo instance.""" connection = conf_lib.get_server_connection(config) model_model = connection.get_model('ir.model') @@ -1055,59 +1047,63 @@ def list_models(): return for m in sorted(models, key=lambda f: f['model']): - sys.stdout.write('%s (%s)\n' % (m['model'], m['name'])) + sys.stdout.write(f"{m['model']} ({m['name']})\n") -def show_version(): - sys.stdout.write("%s %s\n" % (module_name, module_version)) +def show_version() -> None: + """Show the version of the script.""" + sys.stdout.write(f"{module_name} {module_version}\n") ############################################################################## # MAIN ############################################################################## -if __name__ == '__main__': - module_name = os.path.basename(sys.argv[0]) - # f = is_remote_host('pcodoo') +def main() -> None: + """Main function.""" + global module_name, conf_dir_name, orig_dir_name, data_dir_name, log_dir_name, selection_sep, default_base_dir + global scaffold, base_dir, dbname, host, model, userid, config, outfile, required, skeleton, wstored, wo2m, wmetadata, mapsel, wxmlid, maxdescr, offline, append, list, force, verbose, version, fieldname + global script_extension, project_name, model_mapped_name, model_class_name, model_mapping_name, csv_delimiter, default_python_exe, default_path + + module_name = Path(sys.argv[0]).name conf_dir_name = 'conf' orig_dir_name = 'origin' - orig_raw_dir_name = os.path.join(orig_dir_name, 'binary') data_dir_name = 'data' log_dir_name = 'log' selection_sep = ': ' - default_base_dir = os.path.join('.','') + default_base_dir = Path('.') - module_descr = """Version: %s + module_descr = f"""Version: {module_version} Create the structure of an import project and model skeleton codes working - with odoo_csv_tools (https://github.com/tfrancoi/odoo_csv_import). + with odoo_data_flow (https://github.com/OdooDataFlow/odoo-data-flow). Functionalities: ---------------- - Create the project structure: - %s -s -p PATH [-d DBNAME] [-t HOST] [-u USERID] [-f] [-v] + {module_name} -s -p PATH [-d DBNAME] [-t HOST] [-u USERID] [-f] [-v] - Skeleton a model: - %s -m MODEL [-a] [--map-selection] [--with-xmlid] [-r] [-k map | -n] + {module_name} -m MODEL [-a] [--map-selection] [--with-xmlid] [-r] [-k map | -n] [--with-one2many] [--with-metadata] [--stored] [-v] [--max-descr MAXDESCR] [-f] [-o OUTFILE] [-c CONFIG] - Show available models: - %s -l [-c CONFIG] - """ % (module_version, module_name, module_name, module_name) + {module_name} -l [-c CONFIG] + """ module_epilog = """ - More information on https://github.com/jad-odoo/odoo_import_scaffold + More information on https://github.com/OdooDataFlow/odoo_dataflow_scaffold """ - + parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=module_descr, epilog=module_epilog) parser.add_argument('-s', '--scaffold', dest='scaffold', action='store_true', help='create the folders structure and the basic project files') - parser.add_argument('-p', '--path', dest='path', default=default_base_dir, required=False, help='project path (default: current dir)') + parser.add_argument('-p', '--path', dest='path', type=Path, default=default_base_dir, required=False, help='project path (default: current dir)') parser.add_argument('-d', '--db', dest='dbname', default='', required=False, help='target database. If omitted, it is the first part of HOST') parser.add_argument('-t', '--host', dest='host', default='localhost', required=False, help='hostname of the database (default: localhost)') parser.add_argument('-u', '--userid', dest='userid', type=int, default=2, required=False, help='user id of RPC calls (default: 2)') parser.add_argument('-m', '--model', dest='model', required=False, help='technical name of the model to skeleton (ex: res.partner)') - parser.add_argument('-c', '--config', dest='config', default=os.path.join(conf_dir_name,'connection.conf'), required=False, help='configuration file (relative to --path) defining the RPC connections parameters (default: %s)' % os.path.join(conf_dir_name, 'connection.conf')) - parser.add_argument('-o', '--outfile', dest='outfile', required=False, help='python script of the model skeleton code (default: model name with dots replaced by underscores)') + parser.add_argument('-c', '--config', dest='config', type=Path, default=Path(conf_dir_name) / 'connection.conf', required=False, help=f'configuration file (relative to --path) defining the RPC connections parameters (default: {Path(conf_dir_name) / "connection.conf"})') + parser.add_argument('-o', '--outfile', dest='outfile', type=Path, required=False, help='python script of the model skeleton code (default: model name with dots replaced by underscores)') parser.add_argument('-k', '--skeleton', dest='skeleton', choices=['dict','map'], default='dict', required = False, help='skeleton code type. dict: generate mapping as a simple dictionary. map: create the same dictionary with map functions for each field (default: dict)') parser.add_argument('-r', '--required', dest='required', action='store_true', help='keep only the required fields without default value (comment the optional fields') parser.add_argument('--field-name', dest='fieldname', choices=['tech','user'], default='user', required = False, help='Field name in import file. tech=technical name, user=User name (default: user). Generates the mapping accordingly.') @@ -1116,14 +1112,14 @@ def show_version(): parser.add_argument('--with-metadata', dest='wmetadata', action='store_true', help="include metadata fields") parser.add_argument('--map-selection', dest='mapsel', action='store_true', help="generate inverse mapping dictionaries (visible value -> technical value) of selection fields in mapping.py") parser.add_argument('--with-xmlid', dest='wxmlid', action='store_true', help="assume the client file contains XML_IDs in identifier fields") - parser.add_argument('--max-descr', dest='maxdescr', default=10, help="limit long descriptions of default value and compute method to MAXDESCR lines (default: 10)") + parser.add_argument('--max-descr', dest='maxdescr', type=int, default=10, help="limit long descriptions of default value and compute method to MAXDESCR lines (default: 10)") parser.add_argument('-n', '--offline', dest='offline', action='store_true', help="don't fetch fields from model. Create a minimal skeleton") parser.add_argument('-a', '--append', dest='append', action='store_true', help="add model references to files.py, prefix.py and action scripts") parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite files and directories if existing.') parser.add_argument('-l', '--list', dest='list', action='store_true', help="List installed models in the target Odoo instance") parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='display process information') parser.add_argument('--version', dest='version', action='store_true', help='show version') - + args = parser.parse_args() # Manage params @@ -1142,7 +1138,7 @@ def show_version(): wmetadata = args.wmetadata mapsel = args.mapsel wxmlid = args.wxmlid - maxdescr = int(args.maxdescr) + maxdescr = args.maxdescr offline = args.offline append = args.append list = args.list @@ -1162,15 +1158,12 @@ def show_version(): # If no action set, prompt for scaffolding action_args = [scaffold, model] if not any(action_args): - if sys.version_info >= (3, 0, 0): - response = input("Do you want the create the folder structure in %s ? (y|N): " % base_dir) - else: - response = raw_input("Do you want the create the folder structure in %s ? (y|N): " % base_dir) + response = input(f"Do you want the create the folder structure in {base_dir} ? (y|N): ") scaffold = ('Y' == response.upper()) if not scaffold and not model: sys.stderr.write('You need to set an action with -s|--scaffold or -m|--model or -l|--list\n') - sys.stderr.write('Type %s -h|--help for help\n' % module_name) + sys.stderr.write(f'Type {module_name} -h|--help for help\n') sys.exit(1) script_extension = '.cmd' if platform.system() == 'Windows' else '.sh' @@ -1178,33 +1171,35 @@ def show_version(): # Do cascaded actions if scaffold: if base_dir == default_base_dir: - project_name = os.path.basename(os.path.normpath(os.getcwd())) + project_name = Path.cwd().name else: - project_name = os.path.basename(os.path.normpath(base_dir)) - - conf_dir = os.path.join(base_dir, conf_dir_name) - orig_dir = os.path.join(base_dir, orig_dir_name) - orig_raw_dir = os.path.join(base_dir, orig_raw_dir_name) - data_dir = os.path.join(base_dir, data_dir_name) - log_dir = os.path.join(base_dir, log_dir_name) + project_name = base_dir.name + + conf_dir = base_dir / conf_dir_name + orig_dir = base_dir / orig_dir_name + orig_raw_dir = orig_dir / 'binary' + data_dir = base_dir / data_dir_name + log_dir = base_dir / log_dir_name # If database is omitted, get the first part of the hostname if is_remote_host(host) and not dbname: - dbname = host.split('.')[:1][0] - sys.stdout.write('Database is set by default to %s.\n' % dbname) + dbname = host.split('.')[0] + sys.stdout.write(f'Database is set by default to {dbname}.\n') scaffold_dir() if model: model_mapped_name = model.replace('.', '_') model_class_name = model.title().replace('.', '') - model_mapping_name = '_'.join(('mapping', model_mapped_name)) + model_mapping_name = f'mapping_{model_mapped_name}' if not outfile: - outfile = '.'.join((model_mapped_name, 'py')) - outfile = os.path.join(base_dir,outfile) - config = os.path.join(base_dir, config) + outfile = Path(f'{model_mapped_name}.py') + config = base_dir / config csv_delimiter = ';' default_python_exe = '' default_path = '' - + scaffold_model() + +if __name__ == '__main__': + main() From 6d5bdc00ec46890ff82852ffbee2447af125038f Mon Sep 17 00:00:00 2001 From: bosd Date: Wed, 9 Jul 2025 19:51:52 +0200 Subject: [PATCH 02/24] [REF] Rename to odoo_dataflow_scaffold --- odoo_import_scaffold.py => odoo_dataflow_scaffold.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename odoo_import_scaffold.py => odoo_dataflow_scaffold.py (99%) diff --git a/odoo_import_scaffold.py b/odoo_dataflow_scaffold.py similarity index 99% rename from odoo_import_scaffold.py rename to odoo_dataflow_scaffold.py index 6a78d19..e7c4690 100755 --- a/odoo_import_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -227,7 +227,7 @@ def create_load_script(file: Path) -> None: f.write(" printf \"] $runtime \\n\"\n") f.write("}\n\n") f.write("load_script() {\n") - f.write(" # rm -f \$LOGDIR/load_$1_*.log\n") + f.write(" # rm -f $LOGDIR/load_$1_*.log") f.write(" ./$1.sh > $LOGDIR/load_$1_out.log 2> $LOGDIR/load_$1_err.log &\n") f.write(" msg \"$1\"\n") f.write("}\n\n") @@ -1029,7 +1029,7 @@ def scaffold_model() -> None: # Add model to clean_data.py script = dirname / 'clean_data.py' with script.open('a', encoding='utf-8') as f: - f.write(f"delete_xml_id(connection, '{model}', f'{{project_name}}_{model_mapped_name}}', demo)\n") + f.write("delete_xml_id(connection, '{}', f'{{project_name}}_{}', demo)\n".format(model, model_mapped_name)) sys.stdout.write(f'Model {model} added in {script}\n') else: sys.stdout.write(f"You should probably add this model in files.py, prefix.py, clean_data.py, transform{script_extension} and load{script_extension} with -a|--append\n") From b5f57a525d58bffbed17e1ec51c4e5a333931d5f Mon Sep 17 00:00:00 2001 From: bosd Date: Wed, 9 Jul 2025 21:13:08 +0200 Subject: [PATCH 03/24] Adjust to renamed functions --- odoo_dataflow_scaffold.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index e7c4690..b4f959a 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -368,7 +368,7 @@ def create_file_clean_data(file: Path) -> None: f.write("from odoo_data_flow.lib import conf_lib\n") f.write("from prefix import *\n") f.write("from files import *\n\n") - f.write("connection = conf_lib.get_server_connection(config_file)\n\n") + f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") f.write("def delete_model_data(connection, model, demo: bool = False) -> None:\n") f.write(" model_model = connection.get_model(model)\n") f.write(" record_ids = model_model.search([])\n") @@ -405,7 +405,7 @@ def create_file_install_lang(file: Path) -> None: f.write("from prefix import *\n") f.write("from files import *\n") f.write("from odoo_data_flow.lib import conf_lib\n\n") - f.write("connection = conf_lib.get_server_connection(config_file)\n\n") + f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") f.write("model_lang = connection.get_model('base.language.install')\n\n") f.write("for key in res_lang_map.keys():\n") f.write(" lang = res_lang_map[key]\n") @@ -429,7 +429,7 @@ def create_file_install_modules(file: Path) -> None: f.write("from odoo_data_flow.lib import conf_lib\n") f.write("from odoo_data_flow.lib.internal.rpc_thread import RpcThread\n") f.write("from files import config_file\n\n") - f.write("connection = conf_lib.get_server_connection(config_file)\n\n") + f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") f.write("model_module = connection.get_model('ir.module.module')\n") f.write("model_module.update_list()\n\n") f.write("# Set the modules to install\n") @@ -459,7 +459,7 @@ def create_file_uninstall_modules(file: Path) -> None: f.write("from odoo_data_flow.lib import conf_lib\n") f.write("from odoo_data_flow.lib.internal.rpc_thread import RpcThread\n") f.write("from files import config_file\n\n") - f.write("connection = conf_lib.get_server_connection(config_file)\n\n") + f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") f.write("model_module = connection.get_model('ir.module.module')\n") f.write("model_module.update_list()\n\n") f.write("# Set the modules to uninstall\n") @@ -486,7 +486,7 @@ def create_file_init_map(file: Path) -> None: f.write("from odoo_data_flow.lib import conf_lib\n") f.write("import json\n") f.write("import io\n\n") - f.write("connection = conf_lib.get_server_connection(config_file)\n\n") + f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") f.write("def build_map_product_category_id(filename: str = '') -> dict:\n") f.write(" # Build a dictionary {product_category : xml_id} of all existing product_category.\n") f.write(" model_data = connection.get_model('ir.model.data')\n") @@ -771,7 +771,7 @@ def load_fields() -> List[ModelField]: global has_tracked_fields global has_computed_fields has_tracked_fields, has_computed_fields = False, False - connection = conf_lib.get_server_connection(config) + connection = conf_lib.get_connection_from_config(config) model_fields = connection.get_model('ir.model.fields') field_ids = model_fields.search([('model', '=', model)]) @@ -804,7 +804,7 @@ def write_begin(file: io.TextIOWrapper) -> None: file.write("# Needed for RPC calls\n") file.write("# import odoolib\n") file.write("# from odoo_data_flow.lib import conf_lib\n") - file.write("# connection = conf_lib.get_server_connection(config_file)\n") + file.write("# connection = conf_lib.get_connection_from_config(config_file)\n") file.write(f"def preprocess_{model_class_name}(header, data):\n") file.write(" # Do nothing\n") file.write(" return header, data\n") @@ -931,7 +931,7 @@ def model_exists(model: str) -> bool: Returns: True if the model exists, False otherwise. """ - connection = conf_lib.get_server_connection(config) + connection = conf_lib.get_connection_from_config(config) model_model = connection.get_model('ir.model') res = model_model.search_count([('model', '=', model), ('transient', '=', False)]) return res != 0 @@ -944,7 +944,8 @@ def scaffold_model() -> None: global host import configparser cfg = configparser.ConfigParser(defaults={'protocol': 'xmlrpc', 'port': 8069}) - cfg.read(config) + sys.stdout.write(f"Attempting to read config from: {config.resolve()}\n") + cfg.read(str(config)) host = cfg.get('Connection', 'hostname') dbname = cfg.get('Connection', 'database') login = cfg.get('Connection', 'login') @@ -1037,7 +1038,7 @@ def scaffold_model() -> None: def list_models() -> None: """List installed models in the target Odoo instance.""" - connection = conf_lib.get_server_connection(config) + connection = conf_lib.get_connection_from_config(config) model_model = connection.get_model('ir.model') models = model_model.search_read([('transient', '=', False), ('model', '!=', '_unknown')], ['model', 'name']) @@ -1063,6 +1064,7 @@ def main() -> None: global module_name, conf_dir_name, orig_dir_name, data_dir_name, log_dir_name, selection_sep, default_base_dir global scaffold, base_dir, dbname, host, model, userid, config, outfile, required, skeleton, wstored, wo2m, wmetadata, mapsel, wxmlid, maxdescr, offline, append, list, force, verbose, version, fieldname global script_extension, project_name, model_mapped_name, model_class_name, model_mapping_name, csv_delimiter, default_python_exe, default_path + global conf_dir, orig_dir, orig_raw_dir, data_dir, log_dir module_name = Path(sys.argv[0]).name conf_dir_name = 'conf' From 876b29b7e86dd032eb5157caaa355acada6df927 Mon Sep 17 00:00:00 2001 From: bosd Date: Thu, 10 Jul 2025 00:47:11 +0200 Subject: [PATCH 04/24] feat: Add --create-export-script option and fix NameErrors --- odoo_dataflow_scaffold.py | 181 ++++++++++++++++++++++---------------- 1 file changed, 106 insertions(+), 75 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index b4f959a..d8251b9 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -944,7 +944,6 @@ def scaffold_model() -> None: global host import configparser cfg = configparser.ConfigParser(defaults={'protocol': 'xmlrpc', 'port': 8069}) - sys.stdout.write(f"Attempting to read config from: {config.resolve()}\n") cfg.read(str(config)) host = cfg.get('Connection', 'hostname') dbname = cfg.get('Connection', 'database') @@ -1059,6 +1058,70 @@ def show_version() -> None: # MAIN ############################################################################## +def create_export_script_file(model: str, model_mapped_name: str, outfile: Path, script_extension: str) -> None: + """Create a shell script to export data based on the generated mapper.""" + mapper_file_name = f'{model_mapped_name}.py' + mapper_file_path = Path(outfile).parent / mapper_file_name + + if not mapper_file_path.is_file(): + sys.stderr.write(f"Error: Mapper file {mapper_file_path} not found. Please generate it first using -m MODEL.\n") + sys.exit(1) + + with mapper_file_path.open('r', encoding='utf-8') as f: + mapper_content = f.read() + + # Simple parsing to extract field names from the mapping dictionary + # This assumes the mapping is defined as a dictionary named 'mapping_' + # and each field is a key in that dictionary. + field_names = [] + mapping_var_name = f'mapping_{model_mapped_name}' + + # Find the mapping dictionary definition + mapping_start = mapper_content.find(f'{mapping_var_name} = {{') + if mapping_start == -1: + sys.stderr.write(f"Error: Could not find mapping dictionary '{mapping_var_name}' in {mapper_file_path}.\n") + sys.exit(1) + + mapping_end = mapper_content.find('}\n\n', mapping_start) + if mapping_end == -1: + sys.stderr.write(f"Error: Could not find end of mapping dictionary in {mapper_file_path}.\n") + sys.exit(1) + + mapping_dict_str = mapper_content[mapping_start + len(f'{mapping_var_name} = {{'):mapping_end] + + for line in mapping_dict_str.splitlines(): + line = line.strip() + if line.startswith('#') or not line: + continue + + # Extract the key (field name) + try: + field_name = line.split(':')[0].strip().strip('\'"') + if field_name: + field_names.append(field_name) + except IndexError: + continue + + if not field_names: + sys.stderr.write(f"Warning: No field names found in mapper file {mapper_file_path}.\n") + + export_script_name = f'{model_mapped_name}_export{script_extension}' + export_script_path = Path(outfile).parent / export_script_name + + with export_script_path.open('w', encoding='utf-8') as f: + if platform.system() != 'Windows': + f.write("#!/usr/bin/env bash\n\n") + f.write(f"odoo-data-flow export \\\n") + f.write(f" --config conf/connection.conf \\\n") + f.write(f" --model \"{model}\" \\\n") + f.write(f" --file \"origin/{model_mapped_name}.csv\" \\\n") + f.write(f" --fields \"{','.join(field_names)}\"\n") + + if platform.system() != 'Windows': + export_script_path.chmod(0o755) + + sys.stdout.write(f"Export script created at {export_script_path}\n") + def main() -> None: """Main function.""" global module_name, conf_dir_name, orig_dir_name, data_dir_name, log_dir_name, selection_sep, default_base_dir @@ -1074,80 +1137,7 @@ def main() -> None: selection_sep = ': ' default_base_dir = Path('.') - - module_descr = f"""Version: {module_version} - Create the structure of an import project and model skeleton codes working - with odoo_data_flow (https://github.com/OdooDataFlow/odoo-data-flow). - - Functionalities: - ---------------- - - Create the project structure: - {module_name} -s -p PATH [-d DBNAME] [-t HOST] [-u USERID] [-f] [-v] - - - Skeleton a model: - {module_name} -m MODEL [-a] [--map-selection] [--with-xmlid] [-r] [-k map | -n] - [--with-one2many] [--with-metadata] [--stored] [-v] - [--max-descr MAXDESCR] [-f] [-o OUTFILE] [-c CONFIG] - - - Show available models: - {module_name} -l [-c CONFIG] - """ - - module_epilog = """ - More information on https://github.com/OdooDataFlow/odoo_dataflow_scaffold - """ - - parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=module_descr, epilog=module_epilog) - parser.add_argument('-s', '--scaffold', dest='scaffold', action='store_true', help='create the folders structure and the basic project files') - parser.add_argument('-p', '--path', dest='path', type=Path, default=default_base_dir, required=False, help='project path (default: current dir)') - parser.add_argument('-d', '--db', dest='dbname', default='', required=False, help='target database. If omitted, it is the first part of HOST') - parser.add_argument('-t', '--host', dest='host', default='localhost', required=False, help='hostname of the database (default: localhost)') - parser.add_argument('-u', '--userid', dest='userid', type=int, default=2, required=False, help='user id of RPC calls (default: 2)') - parser.add_argument('-m', '--model', dest='model', required=False, help='technical name of the model to skeleton (ex: res.partner)') - parser.add_argument('-c', '--config', dest='config', type=Path, default=Path(conf_dir_name) / 'connection.conf', required=False, help=f'configuration file (relative to --path) defining the RPC connections parameters (default: {Path(conf_dir_name) / "connection.conf"})') - parser.add_argument('-o', '--outfile', dest='outfile', type=Path, required=False, help='python script of the model skeleton code (default: model name with dots replaced by underscores)') - parser.add_argument('-k', '--skeleton', dest='skeleton', choices=['dict','map'], default='dict', required = False, help='skeleton code type. dict: generate mapping as a simple dictionary. map: create the same dictionary with map functions for each field (default: dict)') - parser.add_argument('-r', '--required', dest='required', action='store_true', help='keep only the required fields without default value (comment the optional fields') - parser.add_argument('--field-name', dest='fieldname', choices=['tech','user'], default='user', required = False, help='Field name in import file. tech=technical name, user=User name (default: user). Generates the mapping accordingly.') - parser.add_argument('--stored', dest='wstored', action='store_true', help="include only stored fields") - parser.add_argument('--with-o2m', dest='wo2m', action='store_true', help="include one2many fields") - parser.add_argument('--with-metadata', dest='wmetadata', action='store_true', help="include metadata fields") - parser.add_argument('--map-selection', dest='mapsel', action='store_true', help="generate inverse mapping dictionaries (visible value -> technical value) of selection fields in mapping.py") - parser.add_argument('--with-xmlid', dest='wxmlid', action='store_true', help="assume the client file contains XML_IDs in identifier fields") - parser.add_argument('--max-descr', dest='maxdescr', type=int, default=10, help="limit long descriptions of default value and compute method to MAXDESCR lines (default: 10)") - parser.add_argument('-n', '--offline', dest='offline', action='store_true', help="don't fetch fields from model. Create a minimal skeleton") - parser.add_argument('-a', '--append', dest='append', action='store_true', help="add model references to files.py, prefix.py and action scripts") - parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite files and directories if existing.') - parser.add_argument('-l', '--list', dest='list', action='store_true', help="List installed models in the target Odoo instance") - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='display process information') - parser.add_argument('--version', dest='version', action='store_true', help='show version') - - args = parser.parse_args() - - # Manage params - scaffold = args.scaffold - base_dir = args.path - dbname = args.dbname - host = args.host - model = args.model - userid = args.userid - config = args.config - outfile = args.outfile - required = args.required - skeleton = args.skeleton - wstored = args.wstored - wo2m = args.wo2m - wmetadata = args.wmetadata - mapsel = args.mapsel - wxmlid = args.wxmlid - maxdescr = args.maxdescr - offline = args.offline - append = args.append - list = args.list - force = args.force - verbose = args.verbose - version = args.version - fieldname = args.fieldname + script_extension = '.cmd' if platform.system() == 'Windows' else '.sh' # Do unit actions if version: @@ -1157,6 +1147,47 @@ def main() -> None: list_models() sys.exit(0) + if model: + model_mapped_name = model.replace('.', '_') + if not outfile: + outfile = Path(f'{model_mapped_name}.py') + + if create_export_script: + if not model: + sys.stderr.write("Error: --create-export-script requires -m MODEL to be specified.\n") + sys.exit(1) + create_export_script_file(model, model_mapped_name, outfile, script_extension) + sys.exit(0) + + if export_fields: + if not model: + sys.stderr.write("Error: --export-fields requires -m MODEL to be specified.\n") + sys.exit(1) + if not dbname: + sys.stderr.write("Error: --export-fields requires a database connection. Please provide -d DBNAME or ensure connection.conf is properly configured.\n") + sys.exit(1) + + sys.stdout.write("Generating import-compatible fields...\n") + fields = load_fields() + exportable_field_names = [] + for f in fields: + # Exclude fields not suitable for direct import + if f.name in ('create_uid', 'write_uid'): + continue + if f.compute: + continue + if f.related: + continue + if not f.store: + continue + if f.type == 'one2many': + continue + + exportable_field_names.append(f.name) + + sys.stdout.write(",".join(exportable_field_names) + "\n") + sys.stdout.write("Import-compatible fields generated.\n") + # If no action set, prompt for scaffolding action_args = [scaffold, model] if not any(action_args): From 3f575ab315053270f0dc0df98b8ca6bd72b87a9b Mon Sep 17 00:00:00 2001 From: bosd Date: Thu, 10 Jul 2025 01:02:20 +0200 Subject: [PATCH 05/24] fix: Handle empty datetime strings in mapper generation --- odoo_dataflow_scaffold.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index d8251b9..38e284b 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -741,7 +741,7 @@ def get_mapper_command(self) -> str: elif self.type in ('boolean'): return f"mapper.bool_val('{self.get_name()}', true_vals=true_values, false_vals=false_values)" elif self.type in ('datetime'): - return f"mapper.val('{self.get_name()}', postprocess=lambda x: datetime.strptime(x, 'CSV_DATE_FORMAT').strftime('%%Y-%%m-%%d 00:00:00'))" + return f"mapper.val('{self.get_name()}', postprocess=lambda x: datetime.strptime(x, 'CSV_DATE_FORMAT').strftime('%%Y-%%m-%%d 00:00:00') if x else '')" elif self.type in ('binary'): return f"mapper.binary('{self.get_name()}', data_raw_dir)" elif self.type in ('selection'): From 849b86c0264d481a32c9fbee9de1adb4c52f13a0 Mon Sep 17 00:00:00 2001 From: bosd Date: Thu, 10 Jul 2025 01:11:09 +0200 Subject: [PATCH 06/24] Fix: Resolve version error by assigning arguments correctly --- odoo_dataflow_scaffold.py | 78 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 38e284b..adf4ee5 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1139,6 +1139,84 @@ def main() -> None: script_extension = '.cmd' if platform.system() == 'Windows' else '.sh' + module_descr = f"""Version: {module_version} + Create the structure of an import project and model skeleton codes working + with odoo_data_flow (https://github.com/OdooDataFlow/odoo-data-flow). + + Functionalities: + ---------------- + - Create the project structure: + {module_name} -s -p PATH [-d DBNAME] [-t HOST] [-u USERID] [-f] [-v] + + - Skeleton a model: + {module_name} -m MODEL [-a] [--map-selection] [--with-xmlid] [-r] [-k map | -n] + [--with-one2many] [--with-metadata] [--stored] [-v] + [--max-descr MAXDESCR] [-f] [-o OUTFILE] [-c CONFIG] + + - Show available models: + {module_name} -l [-c CONFIG] + """ + + module_epilog = """ + More information on https://github.com/OdooDataFlow/odoo_dataflow_scaffold + """ + + parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=module_descr, epilog=module_epilog) + parser.add_argument('-s', '--scaffold', dest='scaffold', action='store_true', help='create the folders structure and the basic project files') + parser.add_argument('-p', '--path', dest='path', type=Path, default=default_base_dir, required=False, help='project path (default: current dir)') + parser.add_argument('-d', '--db', dest='dbname', default='', required=False, help='target database. If omitted, it is the first part of HOST') + parser.add_argument('-t', '--host', dest='host', default='localhost', required=False, help='hostname of the database (default: localhost)') + parser.add_argument('-u', '--userid', dest='userid', type=int, default=2, required=False, help='user id of RPC calls (default: 2)') + parser.add_argument('-m', '--model', dest='model', required=False, help='technical name of the model to skeleton (ex: res.partner)') + parser.add_argument('-c', '--config', dest='config', type=Path, default=Path(conf_dir_name) / 'connection.conf', required=False, help=f'configuration file (relative to --path) defining the RPC connections parameters (default: {Path(conf_dir_name) / "connection.conf"})') + parser.add_argument('-o', '--outfile', dest='outfile', type=Path, required=False, help='python script of the model skeleton code (default: model name with dots replaced by underscores)') + parser.add_argument('-k', '--skeleton', dest='skeleton', choices=['dict','map'], default='dict', required = False, help='skeleton code type. dict: generate mapping as a simple dictionary. map: create the same dictionary with map functions for each field (default: dict)') + parser.add_argument('-r', '--required', dest='required', action='store_true', help='keep only the required fields without default value (comment the optional fields') + parser.add_argument('--field-name', dest='fieldname', choices=['tech','user'], default='user', required = False, help='Field name in import file. tech=technical name, user=User name (default: user). Generates the mapping accordingly.') + parser.add_argument('--stored', dest='wstored', action='store_true', help="include only stored fields") + parser.add_argument('--with-o2m', dest='wo2m', action='store_true', help="include one2many fields") + parser.add_argument('--with-metadata', dest='wmetadata', action='store_true', help="include metadata fields") + parser.add_argument('--map-selection', dest='mapsel', action='store_true', help="generate inverse mapping dictionaries (visible value -> technical value) of selection fields in mapping.py") + parser.add_argument('--with-xmlid', dest='wxmlid', action='store_true', help="assume the client file contains XML_IDs in identifier fields") + parser.add_argument('--max-descr', dest='maxdescr', type=int, default=10, help="limit long descriptions of default value and compute method to MAXDESCR lines (default: 10)") + parser.add_argument('-n', '--offline', dest='offline', action='store_true', help="don't fetch fields from model. Create a minimal skeleton") + parser.add_argument('-a', '--append', dest='append', action='store_true', help="add model references to files.py, prefix.py and action scripts") + parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite files and directories if existing.') + parser.add_argument('--create-export-script', dest='create_export_script', action='store_true', help="Create a shell script to export data based on the generated mapper.") + parser.add_argument('--export-fields', dest='export_fields', action='store_true', help="Output a comma-separated list of field names suitable for export.") + parser.add_argument('-l', '--list', dest='list', action='store_true', help="List installed models in the target Odoo instance") + parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='display process information') + parser.add_argument('--version', dest='version', action='store_true', help='show version') + + args = parser.parse_args() + + # Manage params + scaffold = args.scaffold + base_dir = args.path + dbname = args.dbname + host = args.host + model = args.model + userid = args.userid + config = args.config + outfile = args.outfile + required = args.required + skeleton = args.skeleton + wstored = args.wstored + wo2m = args.wo2m + wmetadata = args.wmetadata + mapsel = args.mapsel + wxmlid = args.wxmlid + maxdescr = args.maxdescr + offline = args.offline + append = args.append + create_export_script = args.create_export_script + export_fields = args.export_fields + list = args.list + force = args.force + verbose = args.verbose + version = args.version + fieldname = args.fieldname + # Do unit actions if version: show_version() From 98d140928957af1d492d641b4c89bfd4fe154d84 Mon Sep 17 00:00:00 2001 From: bosd Date: Thu, 10 Jul 2025 14:23:33 +0200 Subject: [PATCH 07/24] [FIX] bool mapper syntax --- odoo_dataflow_scaffold.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index adf4ee5..31f66f8 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -739,7 +739,7 @@ def get_mapper_command(self) -> str: elif self.type in ('integer', 'float', 'monetary'): return f"mapper.num('{self.get_name()}')" elif self.type in ('boolean'): - return f"mapper.bool_val('{self.get_name()}', true_vals=true_values, false_vals=false_values)" + return f"mapper.bool_val('{self.get_name()}', true_values=true_values, false_values=false_values)" elif self.type in ('datetime'): return f"mapper.val('{self.get_name()}', postprocess=lambda x: datetime.strptime(x, 'CSV_DATE_FORMAT').strftime('%%Y-%%m-%%d 00:00:00') if x else '')" elif self.type in ('binary'): From e9129b369cf72779f89cf0ddc0df717a8b737b70 Mon Sep 17 00:00:00 2001 From: bosd Date: Thu, 10 Jul 2025 15:09:05 +0200 Subject: [PATCH 08/24] [REM]: verbose function is deprecated --- odoo_dataflow_scaffold.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 31f66f8..84fc506 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -29,12 +29,10 @@ def create_folder(path: Path) -> None: path: The path of the folder to create. """ if path.exists() and not force: - if verbose: - sys.stdout.write(f'Folder {path} already exists.\n') + sys.stdout.write(f'Folder {path} already exists.\n') return - if verbose: - sys.stdout.write(f'Create folder {path}\n') + sys.stdout.write(f'Create folder {path}\n') path.mkdir(parents=True, exist_ok=True) @@ -51,11 +49,9 @@ def check_file_exists(func: Callable) -> Callable: def wrapper(*args, **kwargs): file_path = Path(args[0]) if file_path.is_file() and not force: - if verbose: - sys.stdout.write(f'File {file_path} already exists.\n') + sys.stdout.write(f'File {file_path} already exists.\n') return - if verbose: - sys.stdout.write(f'Create file {file_path}\n') + sys.stdout.write(f'Create file {file_path}\n') func(*args, **kwargs) return wrapper @@ -846,7 +842,7 @@ def write_end(file: io.TextIOWrapper) -> None: if ctx_opt: ctx = f"'context': {{{', '.join(ctx_opt)}}}, " - file.write(f"processor.process({model_mapping_name}, dest_{model_mapped_name}, {{'model': '{model}', {ctx}'groupby': '', 'worker': DEFAULT_WORKER, 'batch_size': DEFAULT_BATCH_SIZE}}, 'set', verbose=False)\n\n") + file.write(f"processor.process({model_mapping_name}, dest_{model_mapped_name}, {{'model': '{model}', {ctx}'groupby': '', 'worker': DEFAULT_WORKER, 'batch_size': DEFAULT_BATCH_SIZE}}, 'set')\n\n") file.write(f"processor.write_to_file('{model_mapped_name}{script_extension}', python_exe='{default_python_exe}', path='{default_path}')\n\n") From 766bfd61a3a75648e5f230b3da0c2b854670c77d Mon Sep 17 00:00:00 2001 From: bosd Date: Mon, 14 Jul 2025 18:49:12 +0200 Subject: [PATCH 09/24] Update Readme --- README.md | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 9459b92..b3778b0 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ git clone git@github.com:OdooDataFlow/odoo_dataflow_scaffold.git # 2. How To Use It ## 2.1. Create the project structure - + To create the project structure, run the following command: ```bash @@ -75,7 +75,7 @@ The main options are: For more information on project scaffolding, see [Folders Structure and Project Files](#3-folders-structure-and-project-files). ## 2.2. Generate a model skeleton code - + From your project folder, verify the connection parameters in `conf/connection.conf` and generate the skeleton code for a model: ``` @@ -108,8 +108,8 @@ The complete set of options is described [here](#4-model-skeleton-codes). # Model my.model src_my_model = os.path.join(data_src_dir, 'my_model.csv') ``` - - * Review the python script _my_model.py_. In the generated code, you should at least: + + * Review the python script _my_model.py_. In the generated code, you should at least: * verify the column names. By default their name is the same as the field, which is probably not correct for all columns of the client file. When the tag 'CSV_COLUMN' appears, you also have to replace it by the right column name, * apply the right date formats, if any. You always need to replace the tag 'CSV_DATE_FORMAT' with the [directives](https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior) reflecting the field format in your client file, * comment or remove the fields you don't need. @@ -123,7 +123,7 @@ The complete set of options is described [here](#4-model-skeleton-codes). # Date of Transfer (#2832): stored, optional, readonly, datetime 'date_done': mapper.val('date_done', postprocess=lambda x: datetime.strptime(x, 'CSV_DATE_FORMAT').strftime('%Y-%m-%d 00:00:00')), ``` - + * By default the delimiter of your CSV file is set to a semicolon ';'. If you use another delimiter you need to change it at the line: ```python @@ -133,7 +133,7 @@ The complete set of options is described [here](#4-model-skeleton-codes). * `mapping.py` if you used the option **--map-selection**, * `prefix.py` if you import boolean fields or if you didn't use the option **--with-xmlid**, * the transform script `transform.sh` (`transform.cmd` on Windows) and the load script `load.sh` (`load.cmd` on Windows) to be sure that all shell commands you need will be launched. - + See the options **[--map-selection](#map-selection)** and **[-a | --append](#append)** for more details. ## 2.4. Repeat steps 2.2 and 2.3 for all models and client files you need to process. @@ -265,18 +265,18 @@ mapping_my_model = { ... } ``` -In both skeleton types, the default columns name can be chosen between the technical or the user field name in Odoo with option **--field-name tech** or **--field-name user**. +In both skeleton types, the default columns name can be chosen between the technical or the user field name in Odoo with option `--field-name tech` or `--field-name user`. The first displayed field is always the "id" field, then the required ones, then the optional ones, both sorted by name. Their map functions, if any, follow the same order. -You can use the option **-n | --offline** to get a skeleton code without field. +You can use the option **-n | --offline** to get a skeleton code without field. ```python mapping_my_model = { 'id': , } ``` -This way you keep the benefits of the basic skeleton and the automatic integration of your new script in the project. But you're not annoyed with a lot of fields you probably don't need. +This way you keep the benefits of the basic skeleton and the automatic integration of your new script in the project. But you're not annoyed with a lot of fields you probably don't need. >**Note:** You may also consider the option [**-r | --required**](#required). @@ -285,8 +285,8 @@ The offline mode is automatically set when no database is provided. When working ## 4.2. Fields Selection You can exclude the non stored fields with the option **--stored**. It is usually advisable but it avoids to import through non stored inherited fields. - - One2many and metadata fields are excluded by default but they can be included respectively with the options **--with-o2m** and **--with-metadata**. + + One2many and metadata fields are excluded by default but they can be included respectively with the options **--with-o2m** and **--with-metadata**. ## 4.3. Fields Information @@ -316,18 +316,18 @@ Example: 'state_id/id': mapper.m2o(PREFIX_RES_COUNTRY_STATE, 'state_id'), # Sales Order (#5305): stored, required, selection -# SELECTION: 'no-message': No Message, 'warning': Warning, 'block': Blocking Message, +# SELECTION: 'no-message': No Message, 'warning': Warning, 'block': Blocking Message, # DEFAULT: no-message 'sale_warn': mapper.val('sale_warn'), # Rml header (#1069): stored, required, text -# DEFAULT: -# +# DEFAULT: +# #
# # [truncated...] 'rml_header': mapper.val('rml_header'), -``` +``` ## 4.4. Other Skeleton Options @@ -369,21 +369,21 @@ Now with **--with-xmlid**. 'field_id/id': mapper.val('field_id'), ``` -With the option **-r | --required**, all the skeleton code related to optional fields is commented. It's handy when you have a few fields to import into a model that has a lot. You still have all the fields described, but you don't need to (un)comment or remove lots of them. +With the option **-r | --required**, all the skeleton code related to optional fields is commented. It's handy when you have a few fields to import into a model that has a lot. You still have all the fields described, but you don't need to (un)comment or remove lots of them. >**Note:** Some fields are always commented because they should not be imported. It's namely the case with related stored, computed and non stored (and non related) fields. At the end of the skeleton code stands the command line that launches a transformation to one import file (here: _dest_my_model_). ```python processor.process(mapping_my_model, dest_my_model, {'model': 'my.model', 'context': "{'some_key': True|False}", 'groupby': '', 'worker': 1, 'batch_size': 10}, 'set', verbose=False) ``` -This line is preset with some options: _groupby_, _worker_ and _batch_size_ you may want to change. By default, no context is provided, letting the import script from odoo_csv_tools (_odoo_import_thread.py_) manage a default one. Meanwhile, under certain conditions, a context is prefilled here with: +This line is preset with some options: _groupby_, _worker_ and _batch_size_ you may want to change. By default, no context is provided, letting the import script from odoo_data_flow (_odoo_import_threaded.py_) manage a default one. Meanwhile, under certain conditions, a context is prefilled here with: * **'tracking_disable': True** if a tracked field was found in the model. * **'defer_fields_computation': True** if a computed field was found in the model. * **'write_metadata': True** if the option _--with-metadata_ was used _(and even if there is no audit fields)_. By default, the generated python script is located in the current path and named as the model with dots '.' replaced by underscores '_' (my.model -> my_model.py). You can set another file name (and location) with the option **-o | --outfile**. -When a model is added to the project, the needed references can be automatically added in `files.py`, `prefixes.py`, `clean_data.py`, the transform and the load scripts with the option **-a | --append**. +When a model is added to the project, the needed references can be automatically added in `files.py`, `prefixes.py`, `clean_data.py`, the transform and the load scripts with the option **-a | --append**. * In `files.py`: the names of the client file and the import file. ```python @@ -429,7 +429,7 @@ The skeleton code is preserved if its python script already exists. You can recr # 5. Command Line Tricks It is possible to scaffold the project structure and to generate the first skeleton code in one command line. You can only do that with a database where the default credentials _admin/admin_ are valid for the userid. Also, if the database is not on your local computer, encrypted connections must be allowed on port 443. - + ```bash odoo_dataflow_scaffold.py -s -p project_dir -d my_db -t my_db.odoo.com -m my.model -a ``` @@ -439,9 +439,9 @@ odoo_dataflow_scaffold.py -s -p project_dir -d my_db -t my_db.odoo.com -m my.mod ```bash odoo_dataflow_scaffold.py -s -p project_dir -t my_db.odoo.com -m my.model -a ``` - + When no option is used, you are prompted to scaffold a project in your current directory. This path can be changed with the only option **-p | --path** So a project can be quickly scaffolded into your working directory with the command: - + ```bash odoo_dataflow_scaffold.py ``` @@ -453,12 +453,12 @@ or in another directory with the command: # 6. How-To * To quickly create a new project: ```bash -odoo_dataflow_scaffold.py -s -p my_project +odoo_dataflow_scaffold.py -s -p my_project ``` Assuming the file `conf/connection.conf` is valid and you are in the project folder. * To generate a new model: ```bash -odoo_dataflow_scaffold.py -m my.model -a +odoo_dataflow_scaffold.py -m my.model -a ``` * To generate a new model without adding its references to the python and action scripts (_say, you just need the mapping code to integrate in an existing script_): ```bash From 9049e22028dabe59358ed8f1788e21a4c957eeab Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Mon, 14 Jul 2025 19:14:38 +0200 Subject: [PATCH 10/24] :art: apply ruff formatting --- odoo_dataflow_scaffold.py | 1094 ++++++++++++++++++++++++++----------- 1 file changed, 769 insertions(+), 325 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 84fc506..456ebd4 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1,27 +1,27 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -import sys import argparse +import io import os -import errno import platform -import odoolib -import io import socket -from odoo_data_flow.lib import conf_lib +import sys from pathlib import Path -from typing import List, Dict, Any, Callable, Union +from typing import Any, Callable, Dict, List + +from odoo_data_flow.lib import conf_lib -module_version = '1.4.2' +module_version = "1.4.2" offline = False -dbname = '' -hostname = '' +dbname = "" +hostname = "" ############################################################################## # FUNCTIONS FOR DIRECTORY STRUCTURE ############################################################################## + def create_folder(path: Path) -> None: """Create a folder only if it doesn't exist or if the flag "force" is set. @@ -29,10 +29,10 @@ def create_folder(path: Path) -> None: path: The path of the folder to create. """ if path.exists() and not force: - sys.stdout.write(f'Folder {path} already exists.\n') + sys.stdout.write(f"Folder {path} already exists.\n") return - sys.stdout.write(f'Create folder {path}\n') + sys.stdout.write(f"Create folder {path}\n") path.mkdir(parents=True, exist_ok=True) @@ -46,13 +46,15 @@ def check_file_exists(func: Callable) -> Callable: Returns: The decorated function. """ + def wrapper(*args, **kwargs): file_path = Path(args[0]) if file_path.is_file() and not force: - sys.stdout.write(f'File {file_path} already exists.\n') + sys.stdout.write(f"File {file_path} already exists.\n") return - sys.stdout.write(f'Create file {file_path}\n') + sys.stdout.write(f"Create file {file_path}\n") func(*args, **kwargs) + return wrapper @@ -67,7 +69,16 @@ def is_remote_host(hostname: str) -> bool: """ my_host_name = socket.gethostname() my_host_ip = socket.gethostbyname(my_host_name) - if any(hostname in x for x in [my_host_name, my_host_ip, socket.getfqdn(), 'localhost', '127.0.0.1']): + if any( + hostname in x + for x in [ + my_host_name, + my_host_ip, + socket.getfqdn(), + "localhost", + "127.0.0.1", + ] + ): return False return True @@ -81,9 +92,9 @@ def create_connection_file_local(file: Path) -> None: file: The path to the connection file. """ is_remote = is_remote_host(host) - protocol = 'jsonrpcs' if is_remote else 'jsonrpc' - port = '443' if is_remote else '8069' - with file.open('w', encoding='utf-8') as f: + protocol = "jsonrpcs" if is_remote else "jsonrpc" + port = "443" if is_remote else "8069" + with file.open("w", encoding="utf-8") as f: f.write("[Connection]\n") f.write(f"hostname = {host}\n") f.write(f"database = {dbname}\n") @@ -102,7 +113,7 @@ def create_connection_file_remote(file: Path, hostname: str) -> None: file: The path to the connection file. hostname: The hostname of the remote server. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("[Connection]\n") f.write(f"hostname = {hostname}\n") f.write("database = \n") @@ -120,13 +131,13 @@ def create_cleanup_script(file: Path) -> None: Args: file: The path to the cleanup script. """ - if platform.system() == 'Windows': - with file.open('w', encoding='utf-8') as f: + if platform.system() == "Windows": + with file.open("w", encoding="utf-8") as f: f.write("@echo off\n\n") f.write(f"set DIR={data_dir_name}\n") - f.write(f"del /F /S /Q %DIR%\\*\n") + f.write("del /F /S /Q %DIR%\\*\n") else: - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("#!/usr/bin/env bash\n\n") f.write(f"DIR={data_dir_name}/\n") f.write("rm -rf --interactive=never $DIR\n") @@ -141,16 +152,18 @@ def create_transform_script(file: Path) -> None: Args: file: The path to the transform script. """ - if platform.system() == 'Windows': - with file.open('w', encoding='utf-8') as f: + if platform.system() == "Windows": + with file.open("w", encoding="utf-8") as f: f.write("@echo off\n\n") f.write(f"set LOGDIR={log_dir_name}\n") f.write(f"set DATADIR={data_dir_name}\n\n") f.write("call cleanup_data_dir.cmd\n\n") f.write("REM Add here all transform commands\n") - f.write("REM python my_model.py > %LOGDIR%\\transform_$1_out.log 2> %LOGDIR%\\transform_$1_err.log\n") + f.write( + "REM python my_model.py > %LOGDIR%\\transform_$1_out.log 2> %LOGDIR%\\transform_$1_err.log\n" + ) else: - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("#!/usr/bin/env bash\n\n") f.write(f"LOGDIR={log_dir_name}\n") f.write(f"DATADIR={data_dir_name}\n\n") @@ -159,20 +172,24 @@ def create_transform_script(file: Path) -> None: f.write("msg() {\n") f.write(" start=$(date +%s.%3N)\n") f.write(" PID=$!\n") - f.write(" printf \"($PID) Transform ${COLOR}$1${NC} [\"\n") + f.write(' printf "($PID) Transform ${COLOR}$1${NC} ["\n') f.write(" while kill -0 $PID 2> /dev/null; do\n") - f.write(" printf \"â–“\"\n") + f.write(' printf "â–“"\n') f.write(" sleep 1\n") f.write(" done\n") f.write(" end=$(date +%s.%3N)\n") - f.write(" runtime=$(python -c \"print(f'{int(float(end) - float(start))/60}:{int(float(end) - float(start))%60:02}')\")\n") - f.write(" printf \"] $runtime \\n\"\n") + f.write( + " runtime=$(python -c \"print(f'{int(float(end) - float(start))/60}:{int(float(end) - float(start))%60:02}')\")\n" + ) + f.write(' printf "] $runtime \\n"\n') f.write("}\n\n") f.write("load_script() {\n") f.write(" #rm -f $DATADIR/*$1*.csv*\n") f.write(" rm -f $LOGDIR/transform_$1_*.log\n") - f.write(" python $1.py > $LOGDIR/transform_$1_out.log 2> $LOGDIR/transform_$1_err.log &\n") - f.write(" msg \"$1\"\n") + f.write( + " python $1.py > $LOGDIR/transform_$1_out.log 2> $LOGDIR/transform_$1_err.log &\n" + ) + f.write(' msg "$1"\n') f.write("}\n\n") f.write("./cleanup_data_dir.sh\n\n") f.write("# Add here all transform commands\n") @@ -188,21 +205,23 @@ def create_load_script(file: Path) -> None: Args: file: The path to the load script. """ - if platform.system() == 'Windows': - with file.open('w', encoding='utf-8') as f: + if platform.system() == "Windows": + with file.open("w", encoding="utf-8") as f: f.write("@echo off\n\n") f.write(f"set LOGDIR={log_dir_name}\n\n") f.write("REM Add here all load commands\n") - f.write("REM my_model.cmd > %LOGDIR%\\load_$1_out.log 2> %LOGDIR%\\load_$1_err.log\n") + f.write( + "REM my_model.cmd > %LOGDIR%\\load_$1_out.log 2> %LOGDIR%\\load_$1_err.log\n" + ) else: - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("#!/usr/bin/env bash\n\n") f.write(f"LOGDIR={log_dir_name}\n\n") f.write("COLOR='\\033[1;32m'\n") f.write("NC='\\033[0m'\n\n") f.write("user_interrupt() {\n") - f.write(" echo -e \"\\n\\nKeyboard Interrupt detected.\"\n") - f.write(" echo -e \"\\nKill import tasks...\"\n") + f.write(' echo -e "\\n\\nKeyboard Interrupt detected."\n') + f.write(' echo -e "\\nKill import tasks..."\n') f.write(" # Kill the current import\n") f.write(" killall odoo-import-thread.py\n") f.write(" sleep 2\n") @@ -213,19 +232,23 @@ def create_load_script(file: Path) -> None: f.write("msg() {\n") f.write(" start=$(date +%s.%3N)\n") f.write(" PID=$!\n") - f.write(" printf \"($PID) Load ${COLOR}$1${NC} [\"\n") + f.write(' printf "($PID) Load ${COLOR}$1${NC} ["\n') f.write(" while kill -0 $PID 2> /dev/null; do\n") - f.write(" printf \"â–“\"\n") + f.write(' printf "â–“"\n') f.write(" sleep 1\n") f.write(" done\n") f.write(" end=$(date +%s.%3N)\n") - f.write(" runtime=$(python -c \"print(f'{int(float(end) - float(start))/60}:{int(float(end) - float(start))%60:02}')\")\n") - f.write(" printf \"] $runtime \\n\"\n") + f.write( + " runtime=$(python -c \"print(f'{int(float(end) - float(start))/60}:{int(float(end) - float(start))%60:02}')\")\n" + ) + f.write(' printf "] $runtime \\n"\n') f.write("}\n\n") f.write("load_script() {\n") f.write(" # rm -f $LOGDIR/load_$1_*.log") - f.write(" ./$1.sh > $LOGDIR/load_$1_out.log 2> $LOGDIR/load_$1_err.log &\n") - f.write(" msg \"$1\"\n") + f.write( + " ./$1.sh > $LOGDIR/load_$1_out.log 2> $LOGDIR/load_$1_err.log &\n" + ) + f.write(' msg "$1"\n') f.write("}\n\n") f.write("trap user_interrupt SIGINT\n") f.write("trap user_interrupt SIGTSTP\n\n") @@ -241,9 +264,11 @@ def create_file_prefix(file: Path) -> None: Args: file: The path to the prefix.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") - f.write("# This file defines xml_id prefixes and projectwise variables.\n\n") + f.write( + "# This file defines xml_id prefixes and projectwise variables.\n\n" + ) f.write("# Defines here a identifier used in the created XML_ID.\n") f.write(f"project_name = '{project_name}'\n") f.write("\n") @@ -253,10 +278,18 @@ def create_file_prefix(file: Path) -> None: f.write("# CONSTANTS\n") f.write("COMPANY_ID = 'base.main_company'\n") f.write("\n") - f.write("# Define here all values in client files considered as TRUE value.\n") - f.write("true_values = ['TRUE', 'True', 'true', 'YES', 'Yes', 'yes', 'Y', 'y', '1', '1,0', '1.0']\n") - f.write("# Define here all values in client files considered as FALSE value.\n") - f.write("false_values = ['FALSE', 'False', 'false', 'NO', 'No', 'no', 'N', 'n', '0', '0,0', '0.0']\n") + f.write( + "# Define here all values in client files considered as TRUE value.\n" + ) + f.write( + "true_values = ['TRUE', 'True', 'true', 'YES', 'Yes', 'yes', 'Y', 'y', '1', '1,0', '1.0']\n" + ) + f.write( + "# Define here all values in client files considered as FALSE value.\n" + ) + f.write( + "false_values = ['FALSE', 'False', 'false', 'NO', 'No', 'no', 'N', 'n', '0', '0,0', '0.0']\n" + ) f.write("\n") f.write("# Define the languages used in the import.\n") f.write("# These will be installed by calling install_lang.py\n") @@ -267,6 +300,7 @@ def create_file_prefix(file: Path) -> None: f.write("}\n\n") f.write("# XML ID PREFIXES\n") + @check_file_exists def create_file_mapping(file: Path) -> None: """Create the skeleton of mapping.py. @@ -274,7 +308,7 @@ def create_file_mapping(file: Path) -> None: Args: file: The path to the mapping.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This file defines mapping dictionaries.\n\n") f.write("# MAPPING DICTIONARIES\n") @@ -289,7 +323,7 @@ def create_file_files(file: Path) -> None: Args: file: The path to the files.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This file defines the names of all used files.\n\n") f.write("from pathlib import Path\n") @@ -297,7 +331,7 @@ def create_file_files(file: Path) -> None: f.write("# Folders\n") f.write(f"conf_dir = Path('{conf_dir_name}')\n") f.write(f"data_src_dir = Path('{orig_dir_name}')\n") - f.write(f"data_raw_dir = data_src_dir / 'binary'\n") + f.write("data_raw_dir = data_src_dir / 'binary'\n") f.write(f"data_dest_dir = Path('{data_dir_name}')\n") f.write("\n") f.write("# Configuration\n") @@ -306,8 +340,12 @@ def create_file_files(file: Path) -> None: f.write("# Declare here all data files\n") if not model: - f.write("# Client file: src_my_model = data_src_dir / 'my_model.csv'\n") - f.write("# Import file: dest_my_model = data_dest_dir / 'my.model.csv'\n") + f.write( + "# Client file: src_my_model = data_src_dir / 'my_model.csv'\n" + ) + f.write( + "# Import file: dest_my_model = data_dest_dir / 'my.model.csv'\n" + ) f.write("\n") @@ -319,7 +357,7 @@ def create_file_lib(file: Path) -> None: Args: file: The path to the funclib.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This file defines common functions.\n\n") f.write("from odoo_data_flow.lib import mapper\n") @@ -336,20 +374,23 @@ def create_file_lib(file: Path) -> None: f.write("def keep_letters(val: str) -> str:\n") f.write(" return ''.join(filter(str.isalpha, val))\n") f.write("\n\n") - f.write("# For more complex cases, consider using the `unidecode` library.\n") + f.write( + "# For more complex cases, consider using the `unidecode` library.\n" + ) f.write("def remove_accents(val: str) -> str:\n") f.write(" return val\n") f.write("\n\n") f.write("def keep_column_value(val, column):\n") f.write(" def keep_column_value_fun(line):\n") f.write(" if line[column] != val:\n") - f.write(" raise SkippingException(f\"Column {column} with wrong value {val}\")\n") + f.write( + ' raise SkippingException(f"Column {column} with wrong value {val}")\n' + ) f.write(" return line[column]\n") f.write(" return keep_column_value_fun\n") f.write("\n") - @check_file_exists def create_file_clean_data(file: Path) -> None: """Create the skeleton of clean_data.py. @@ -357,32 +398,48 @@ def create_file_clean_data(file: Path) -> None: Args: file: The path to the clean_data.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This script remove the data created by the import.\n\n") f.write("import odoolib\n") f.write("from odoo_data_flow.lib import conf_lib\n") f.write("from prefix import *\n") f.write("from files import *\n\n") - f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") - f.write("def delete_model_data(connection, model, demo: bool = False) -> None:\n") + f.write( + "connection = conf_lib.get_connection_from_config(config_file)\n\n" + ) + f.write( + "def delete_model_data(connection, model, demo: bool = False) -> None:\n" + ) f.write(" model_model = connection.get_model(model)\n") f.write(" record_ids = model_model.search([])\n") f.write(" if demo:\n") - f.write(" print(f'Will remove {len(record_ids)} records from {model}')\n") + f.write( + " print(f'Will remove {len(record_ids)} records from {model}')\n" + ) f.write(" else:\n") - f.write(" print(f'Remove {len(record_ids)} records from {model}')\n") + f.write( + " print(f'Remove {len(record_ids)} records from {model}')\n" + ) f.write(" model_model.unlink(record_ids)\n") f.write("\n\n") - f.write("def delete_xml_id(connection, model, module, demo: bool = False) -> None:\n") + f.write( + "def delete_xml_id(connection, model, module, demo: bool = False) -> None:\n" + ) f.write(" data_model = connection.get_model('ir.model.data')\n") - f.write(" data_ids = data_model.search([('module', '=', module), ('model', '=', model)])\n") + f.write( + " data_ids = data_model.search([('module', '=', module), ('model', '=', model)])\n" + ) f.write(" records = data_model.read(data_ids, ['res_id'])\n") f.write(" record_ids = [rec['res_id'] for rec in records]\n") f.write(" if demo:\n") - f.write(" print(f'Will remove {len(record_ids)} xml_id {module} from {model}')\n") + f.write( + " print(f'Will remove {len(record_ids)} xml_id {module} from {model}')\n" + ) f.write(" else:\n") - f.write(" print(f'Remove {len(record_ids)} xml_id {module} from {model}')\n") + f.write( + " print(f'Remove {len(record_ids)} xml_id {module} from {model}')\n" + ) f.write(" connection.get_model(model).unlink(record_ids)\n") f.write("\n\n") f.write("demo = True\n\n") @@ -395,14 +452,18 @@ def create_file_install_lang(file: Path) -> None: Args: file: The path to the install_lang.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("import odoolib\n") f.write("from prefix import *\n") f.write("from files import *\n") f.write("from odoo_data_flow.lib import conf_lib\n\n") - f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") - f.write("model_lang = connection.get_model('base.language.install')\n\n") + f.write( + "connection = conf_lib.get_connection_from_config(config_file)\n\n" + ) + f.write( + "model_lang = connection.get_model('base.language.install')\n\n" + ) f.write("for key in res_lang_map.keys():\n") f.write(" lang = res_lang_map[key]\n") f.write(" res = model_lang.create({'lang': lang})\n") @@ -416,27 +477,37 @@ def create_file_install_modules(file: Path) -> None: Args: file: The path to the install_modules.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("import sys\n") f.write("import odoolib\n") f.write("from prefix import *\n") f.write("from files import *\n") f.write("from odoo_data_flow.lib import conf_lib\n") - f.write("from odoo_data_flow.lib.internal.rpc_thread import RpcThread\n") + f.write( + "from odoo_data_flow.lib.internal.rpc_thread import RpcThread\n" + ) f.write("from files import config_file\n\n") - f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") + f.write( + "connection = conf_lib.get_connection_from_config(config_file)\n\n" + ) f.write("model_module = connection.get_model('ir.module.module')\n") f.write("model_module.update_list()\n\n") f.write("# Set the modules to install\n") f.write("module_names = []\n\n") - f.write("module_ids = model_module.search_read([['name', 'in', module_names]])\n\n") + f.write( + "module_ids = model_module.search_read([['name', 'in', module_names]])\n\n" + ) f.write("rpc_thread = RpcThread(1)\n\n") f.write("for module in module_ids:\n") f.write(" if module['state'] == 'installed':\n") - f.write(" rpc_thread.spawn_thread(model_module.button_immediate_upgrade, [module['id']])\n") + f.write( + " rpc_thread.spawn_thread(model_module.button_immediate_upgrade, [module['id']])\n" + ) f.write(" else:\n") - f.write(" rpc_thread.spawn_thread(model_module.button_immediate_install, [module['id']])\n") + f.write( + " rpc_thread.spawn_thread(model_module.button_immediate_install, [module['id']])\n" + ) @check_file_exists @@ -446,25 +517,33 @@ def create_file_uninstall_modules(file: Path) -> None: Args: file: The path to the uninstall_modules.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("import sys\n") f.write("import odoolib\n") f.write("from prefix import *\n") f.write("from files import *\n") f.write("from odoo_data_flow.lib import conf_lib\n") - f.write("from odoo_data_flow.lib.internal.rpc_thread import RpcThread\n") + f.write( + "from odoo_data_flow.lib.internal.rpc_thread import RpcThread\n" + ) f.write("from files import config_file\n\n") - f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") + f.write( + "connection = conf_lib.get_connection_from_config(config_file)\n\n" + ) f.write("model_module = connection.get_model('ir.module.module')\n") f.write("model_module.update_list()\n\n") f.write("# Set the modules to uninstall\n") f.write("module_names = []\n\n") - f.write("module_ids = model_module.search_read([['name', 'in', module_names]])\n\n") + f.write( + "module_ids = model_module.search_read([['name', 'in', module_names]])\n\n" + ) f.write("rpc_thread = RpcThread(1)\n\n") f.write("for module in module_ids:\n") f.write(" if module['state'] == 'installed':\n") - f.write(" rpc_thread.spawn_thread(model_module.button_immediate_uninstall, [module['id']])\n") + f.write( + " rpc_thread.spawn_thread(model_module.button_immediate_uninstall, [module['id']])\n" + ) @check_file_exists @@ -474,7 +553,7 @@ def create_file_init_map(file: Path) -> None: Args: file: The path to the init_map.py file. """ - with file.open('w', encoding='utf-8') as f: + with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("import odoolib\n") f.write("from prefix import *\n") @@ -482,56 +561,98 @@ def create_file_init_map(file: Path) -> None: f.write("from odoo_data_flow.lib import conf_lib\n") f.write("import json\n") f.write("import io\n\n") - f.write("connection = conf_lib.get_connection_from_config(config_file)\n\n") - f.write("def build_map_product_category_id(filename: str = '') -> dict:\n") - f.write(" # Build a dictionary {product_category : xml_id} of all existing product_category.\n") + f.write( + "connection = conf_lib.get_connection_from_config(config_file)\n\n" + ) + f.write( + "def build_map_product_category_id(filename: str = '') -> dict:\n" + ) + f.write( + " # Build a dictionary {product_category : xml_id} of all existing product_category.\n" + ) f.write(" model_data = connection.get_model('ir.model.data')\n") - f.write(" model_product_category = connection.get_model('product.category')\n") - f.write(" recs = model_product_category.search_read([], ['id', 'name'])\n\n") + f.write( + " model_product_category = connection.get_model('product.category')\n" + ) + f.write( + " recs = model_product_category.search_read([], ['id', 'name'])\n\n" + ) f.write(" res_map = {}\n") f.write(" for rec in recs:\n") - f.write(" data = model_data.search_read([('res_id', '=', rec['id']), ('model', '=', 'product.category')], ['module', 'name'])\n") + f.write( + " data = model_data.search_read([('res_id', '=', rec['id']), ('model', '=', 'product.category')], ['module', 'name'])\n" + ) f.write(" if len(data):\n") f.write(" key = rec['name'].strip()\n") - f.write(" val = '.'.join([data[0]['module'], data[0]['name'] ])\n") + f.write( + " val = '.'.join([data[0]['module'], data[0]['name'] ])\n" + ) f.write(" res_map[key] = val.strip()\n") f.write(" # else:\n") - f.write(" # print(f'Product category {rec['name']} has no XML_ID (id: {rec['id']})')\n\n") + f.write( + " # print(f'Product category {rec['name']} has no XML_ID (id: {rec['id']})')\n\n" + ) f.write(" if filename:\n") f.write(" with open(filename, 'w') as fp:\n") f.write(" json.dump(res_map, fp)\n\n") f.write(" return res_map\n\n") f.write("# Execute mapping\n") - f.write("# dummy = build_map_product_category_id(work_map_product_category_id)\n\n") + f.write( + "# dummy = build_map_product_category_id(work_map_product_category_id)\n\n" + ) f.write("# Add in files.py\n") - f.write("# work_map_product_category_id = data_src_dir / 'work_map_product_category_id.json'\n\n") + f.write( + "# work_map_product_category_id = data_src_dir / 'work_map_product_category_id.json'\n\n" + ) f.write("# Add in transformation script\n") f.write("# map_product_category_id = {}\n") - f.write("# with io.open(work_map_product_category_id, 'r', encoding='utf-8') as fp:\n") + f.write( + "# with io.open(work_map_product_category_id, 'r', encoding='utf-8') as fp:\n" + ) f.write("# map_product_category_id = json.load(fp)\n\n") - f.write("# Add in transformation script to map 'id' column. REVIEW COLUNM NAME and PREFIX\n") + f.write( + "# Add in transformation script to map 'id' column. REVIEW COLUNM NAME and PREFIX\n" + ) f.write("# def handle_product_category_id(line):\n") f.write("# categ_name = line['Product Category']\n") f.write("# try:\n") - f.write("# categ_xml_id = map_product_category_id[categ_name]\n") + f.write( + "# categ_xml_id = map_product_category_id[categ_name]\n" + ) f.write("# except:\n") - f.write("# categ_xml_id = mapper.m2o(PREFIX_PRODUCT_CATEGORY, 'Product Category')(line)\n") + f.write( + "# categ_xml_id = mapper.m2o(PREFIX_PRODUCT_CATEGORY, 'Product Category')(line)\n" + ) f.write("# return categ_xml_id\n\n") - f.write("##################################################################################################\n\n") - f.write("def build_account_map(company_id: int, filename: str = '') -> dict:\n") - f.write(" # Build a dictionary {account_code : xml_id} of all existing accounts of a company.\n") + f.write( + "##################################################################################################\n\n" + ) + f.write( + "def build_account_map(company_id: int, filename: str = '') -> dict:\n" + ) + f.write( + " # Build a dictionary {account_code : xml_id} of all existing accounts of a company.\n" + ) f.write(" model_data = connection.get_model('ir.model.data')\n") f.write(" model_account = connection.get_model('account.account')\n") - f.write(" recs = model_account.search_read([('company_id', '=', company_id)], ['id', 'code'])\n\n") + f.write( + " recs = model_account.search_read([('company_id', '=', company_id)], ['id', 'code'])\n\n" + ) f.write(" res_map = {}\n") f.write(" for rec in recs:\n") - f.write(" data = model_data.search_read([('res_id', '=', rec['id']), ('model', '=', 'account.account')], ['module', 'name'])\n") + f.write( + " data = model_data.search_read([('res_id', '=', rec['id']), ('model', '=', 'account.account')], ['module', 'name'])\n" + ) f.write(" if len(data):\n") f.write(" key = rec['code'].strip()\n") - f.write(" val = '.'.join([data[0]['module'], data[0]['name'] ])\n") + f.write( + " val = '.'.join([data[0]['module'], data[0]['name'] ])\n" + ) f.write(" res_map[key] = val.strip()\n") f.write(" # else:\n") - f.write(" # print(f'Account {rec['code']} has no XML_ID')\n\n") + f.write( + " # print(f'Account {rec['code']} has no XML_ID')\n\n" + ) f.write(" if filename:\n") f.write(" with open(filename, 'w') as fp:\n") f.write(" json.dump(res_map, fp)\n\n") @@ -539,20 +660,30 @@ def create_file_init_map(file: Path) -> None: f.write("# Execute mapping\n") f.write("# dummy = build_account_map(1, work_map_account_code_id)\n\n") f.write("# Add in files.py\n") - f.write("# work_map_account_code_id = data_src_dir / 'work_map_account_code_id.json'\n\n") + f.write( + "# work_map_account_code_id = data_src_dir / 'work_map_account_code_id.json'\n\n" + ) f.write("# Add in transformation script\n") f.write("# map_account_code_id = {}\n") - f.write("# with io.open(work_map_account_code_id, 'r', encoding='utf-8') as fp:\n") + f.write( + "# with io.open(work_map_account_code_id, 'r', encoding='utf-8') as fp:\n" + ) f.write("# map_account_code_id = json.load(fp)\n\n") - f.write("# Add in transformation script to map 'id' column. REVIEW COLUNM NAME and PREFIX\n") + f.write( + "# Add in transformation script to map 'id' column. REVIEW COLUNM NAME and PREFIX\n" + ) f.write("# def handle_account_account_id_map(line):\n") f.write("# code = line['Accounts']\n") f.write("# try:\n") f.write("# val = map_account_code_id[code]\n") f.write("# except:\n") - f.write("# val = mapper.m2o(PREFIX_ACCOUNT_ACCOUNT, 'Accounts')(line)\n") + f.write( + "# val = mapper.m2o(PREFIX_ACCOUNT_ACCOUNT, 'Accounts')(line)\n" + ) f.write("# return val\n\n") - f.write("##################################################################################################\n\n") + f.write( + "##################################################################################################\n\n" + ) def scaffold_dir() -> None: @@ -563,30 +694,38 @@ def scaffold_dir() -> None: create_folder(Path(data_dir)) create_folder(Path(log_dir)) - create_connection_file_local(Path(conf_dir) / 'connection.conf') - create_connection_file_local(Path(conf_dir) / 'connection.local') - create_connection_file_remote(Path(conf_dir) / 'connection.staging', '.dev.odoo.com') - create_connection_file_remote(Path(conf_dir) / 'connection.master', '.odoo.com') - - create_cleanup_script(Path(base_dir) / f'cleanup_data_dir{script_extension}') - create_transform_script(Path(base_dir) / f'transform{script_extension}') - create_load_script(Path(base_dir) / f'load{script_extension}') - create_file_prefix(Path(base_dir) / 'prefix.py') - create_file_mapping(Path(base_dir) / 'mapping.py') - create_file_files(Path(base_dir) / 'files.py') - create_file_lib(Path(base_dir) / 'funclib.py') - create_file_clean_data(Path(base_dir) / 'clean_data.py') - create_file_install_lang(Path(base_dir) / 'install_lang.py') - create_file_install_modules(Path(base_dir) / 'install_modules.py') - create_file_uninstall_modules(Path(base_dir) / 'uninstall_modules.py') - create_file_init_map(Path(base_dir) / 'init_map.py') + create_connection_file_local(Path(conf_dir) / "connection.conf") + create_connection_file_local(Path(conf_dir) / "connection.local") + create_connection_file_remote( + Path(conf_dir) / "connection.staging", ".dev.odoo.com" + ) + create_connection_file_remote( + Path(conf_dir) / "connection.master", ".odoo.com" + ) + + create_cleanup_script( + Path(base_dir) / f"cleanup_data_dir{script_extension}" + ) + create_transform_script(Path(base_dir) / f"transform{script_extension}") + create_load_script(Path(base_dir) / f"load{script_extension}") + create_file_prefix(Path(base_dir) / "prefix.py") + create_file_mapping(Path(base_dir) / "mapping.py") + create_file_files(Path(base_dir) / "files.py") + create_file_lib(Path(base_dir) / "funclib.py") + create_file_clean_data(Path(base_dir) / "clean_data.py") + create_file_install_lang(Path(base_dir) / "install_lang.py") + create_file_install_modules(Path(base_dir) / "install_modules.py") + create_file_uninstall_modules(Path(base_dir) / "uninstall_modules.py") + create_file_init_map(Path(base_dir) / "init_map.py") sys.stdout.write(f"Project created in {Path(base_dir).resolve()}\n") + ############################################################################## # FUNCTIONS FOR MODEL SKELETON CODE ############################################################################## + class ModelField: """Manages how to get a suited mapper function and how to document itself. @@ -594,32 +733,33 @@ class ModelField: connection: The Odoo connection object. properties: A dictionary of field properties. """ + def __init__(self, connection: Any, properties: Dict[str, Any]) -> None: self.connection = connection self.properties = properties self.import_warn_msg: List[str] = [] - self.id: int = properties.get('id') - self.name: str = properties.get('name') - self.type: str = properties.get('ttype') - self.required: bool = properties.get('required') - self.readonly: bool = properties.get('readonly') - self.string: str = properties.get('field_description') - self.store: bool = properties.get('store') - self.track_visibility: str = properties.get('track_visibility') - self.related: str = properties.get('related') - self.relation: str = properties.get('relation') - self.depends: str = properties.get('depends') + self.id: int = properties.get("id") + self.name: str = properties.get("name") + self.type: str = properties.get("ttype") + self.required: bool = properties.get("required") + self.readonly: bool = properties.get("readonly") + self.string: str = properties.get("field_description") + self.store: bool = properties.get("store") + self.track_visibility: str = properties.get("track_visibility") + self.related: str = properties.get("related") + self.relation: str = properties.get("relation") + self.depends: str = properties.get("depends") self.compute: List[str] = self._get_compute() self.selection: List[str] = self._get_selection() self.default_value: List[str] = self._get_default() # Reasons avoiding to import a field -> commented by get_info if self.related and self.store: - self.import_warn_msg.append('related stored') + self.import_warn_msg.append("related stored") if not self.store and not self.related: - self.import_warn_msg.append('non stored') + self.import_warn_msg.append("non stored") if len(self.compute) > 1: - self.import_warn_msg.append('computed') + self.import_warn_msg.append("computed") def _get_selection(self) -> List[str]: """Fetch the selection values of a field. @@ -631,7 +771,11 @@ def _get_selection(self) -> List[str]: selection_list = [] model_model = self.connection.get_model(model) try: - vals = model_model.fields_get([self.name]).get(self.name).get('selection') + vals = ( + model_model.fields_get([self.name]) + .get(self.name) + .get("selection") + ) if not vals: return selection_list for sel in vals: @@ -648,11 +792,11 @@ def _get_default(self) -> List[str]: or an empty list if no default value exists. """ model_model = self.connection.get_model(model) - val = model_model.default_get([self.name]).get(self.name, '') + val = model_model.default_get([self.name]).get(self.name, "") lines = str(val).splitlines() if maxdescr > -1 and len(lines) > max(1, maxdescr): - lines = lines[:maxdescr] + ['[truncated...]'] + lines = lines[:maxdescr] + ["[truncated...]"] return lines @@ -663,11 +807,11 @@ def _get_compute(self) -> List[str]: A list of strings (because the compute method is often a multiline value) or an empty list if no compute method exists. """ - val = self.properties.get('compute') + val = self.properties.get("compute") lines = str(val).splitlines() if maxdescr > -1 and len(lines) > maxdescr: - lines = lines[:maxdescr] + ['[truncated...]'] + lines = lines[:maxdescr] + ["[truncated...]"] return lines @@ -690,9 +834,9 @@ def get_info(self) -> str: if self.relation: info += f" -> {self.relation}" # Add XMLID summary in field info - model_data = self.connection.get_model('ir.model.data') + model_data = self.connection.get_model("ir.model.data") external_prefixes = model_data.read_group( - [('model', '=', self.relation)], ['module'], ['module'] + [("model", "=", self.relation)], ["module"], ["module"] ) if external_prefixes: info += " - with xml_id in module(s):" @@ -712,42 +856,48 @@ def get_info(self) -> str: info += f"\n # COMPUTE: depends on {self.depends}\n # {'\n # '.join(self.compute)}" if self.import_warn_msg: - info += f"\n # AVOID THIS FIELD: {', '.join(self.import_warn_msg)}" + info += ( + f"\n # AVOID THIS FIELD: {', '.join(self.import_warn_msg)}" + ) return info def get_name(self) -> str: """Returns the technical or user-friendly name of the field.""" - return self.name if fieldname == 'tech' else self.string + return self.name if fieldname == "tech" else self.string def get_mapping_name(self) -> str: """Return the field name as needed in the import file.""" - return f"{self.name}/id" if self.type in ('many2one', 'many2many') else self.name + return ( + f"{self.name}/id" + if self.type in ("many2one", "many2many") + else self.name + ) def get_mapper_command(self) -> str: """Return a suited mapper function according to the field properties and skeleton options.""" - if self.name == 'id': + if self.name == "id": if wxmlid: return f"mapper.val('{self.name}')" else: return "mapper.m2o_map(OBJECT_XMLID_PREFIX, mapper.concat('_', 'CSV_COLUMN1','CSV_COLUMN2'))" - elif self.type in ('integer', 'float', 'monetary'): + elif self.type in ("integer", "float", "monetary"): return f"mapper.num('{self.get_name()}')" - elif self.type in ('boolean'): + elif self.type in ("boolean"): return f"mapper.bool_val('{self.get_name()}', true_values=true_values, false_values=false_values)" - elif self.type in ('datetime'): + elif self.type in ("datetime"): return f"mapper.val('{self.get_name()}', postprocess=lambda x: datetime.strptime(x, 'CSV_DATE_FORMAT').strftime('%%Y-%%m-%%d 00:00:00') if x else '')" - elif self.type in ('binary'): + elif self.type in ("binary"): return f"mapper.binary('{self.get_name()}', data_raw_dir)" - elif self.type in ('selection'): + elif self.type in ("selection"): if mapsel: return f"mapper.map_val('{self.get_name()}', {model_mapped_name}_{self.name}_map)" else: return f"mapper.val('{self.get_name()}')" - elif self.type in ('many2many') and not wxmlid: + elif self.type in ("many2many") and not wxmlid: return f"mapper.m2m(PREFIX_{self.relation.replace('.', '_').upper()}, '{self.get_name()}')" - elif self.type in ('many2one', 'one2many', 'many2many') and not wxmlid: + elif self.type in ("many2one", "one2many", "many2many") and not wxmlid: return f"mapper.m2o(PREFIX_{self.relation.replace('.', '_').upper()}, '{self.get_name()}')" else: @@ -768,9 +918,9 @@ def load_fields() -> List[ModelField]: global has_computed_fields has_tracked_fields, has_computed_fields = False, False connection = conf_lib.get_connection_from_config(config) - model_fields = connection.get_model('ir.model.fields') + model_fields = connection.get_model("ir.model.fields") - field_ids = model_fields.search([('model', '=', model)]) + field_ids = model_fields.search([("model", "=", model)]) fields = model_fields.read(field_ids) ret = [] for field in fields: @@ -800,7 +950,9 @@ def write_begin(file: io.TextIOWrapper) -> None: file.write("# Needed for RPC calls\n") file.write("# import odoolib\n") file.write("# from odoo_data_flow.lib import conf_lib\n") - file.write("# connection = conf_lib.get_connection_from_config(config_file)\n") + file.write( + "# connection = conf_lib.get_connection_from_config(config_file)\n" + ) file.write(f"def preprocess_{model_class_name}(header, data):\n") file.write(" # Do nothing\n") file.write(" return header, data\n") @@ -818,7 +970,9 @@ def write_begin(file: io.TextIOWrapper) -> None: file.write(" # data_new.append(j)\n") file.write(" # return header, data_new\n") file.write("\n") - file.write(f"processor = Processor(src_{model_mapped_name}, delimiter='{csv_delimiter}', preprocess=preprocess_{model_class_name})\n") + file.write( + f"processor = Processor(src_{model_mapped_name}, delimiter='{csv_delimiter}', preprocess=preprocess_{model_class_name})\n" + ) file.write("\n") @@ -828,7 +982,7 @@ def write_end(file: io.TextIOWrapper) -> None: Args: file: The file object to write to. """ - ctx = '' + ctx = "" ctx_opt = [] if dbname and not offline: @@ -842,8 +996,12 @@ def write_end(file: io.TextIOWrapper) -> None: if ctx_opt: ctx = f"'context': {{{', '.join(ctx_opt)}}}, " - file.write(f"processor.process({model_mapping_name}, dest_{model_mapped_name}, {{'model': '{model}', {ctx}'groupby': '', 'worker': DEFAULT_WORKER, 'batch_size': DEFAULT_BATCH_SIZE}}, 'set')\n\n") - file.write(f"processor.write_to_file('{model_mapped_name}{script_extension}', python_exe='{default_python_exe}', path='{default_path}')\n\n") + file.write( + f"processor.process({model_mapping_name}, dest_{model_mapped_name}, {{'model': '{model}', {ctx}'groupby': '', 'worker': DEFAULT_WORKER, 'batch_size': DEFAULT_BATCH_SIZE}}, 'set')\n\n" + ) + file.write( + f"processor.write_to_file('{model_mapped_name}{script_extension}', python_exe='{default_python_exe}', path='{default_path}')\n\n" + ) def write_mapping(file: io.TextIOWrapper) -> None: @@ -859,62 +1017,94 @@ def write_mapping(file: io.TextIOWrapper) -> None: fields = load_fields() def field_filter(f: ModelField) -> bool: - if f.name == '__last_update': + if f.name == "__last_update": return False if wstored and not f.store: return False - if not wo2m and f.type == 'one2many': + if not wo2m and f.type == "one2many": return False - if not wmetadata and f.name in ('create_uid', 'write_uid', 'create_date', 'write_date', 'active'): + if not wmetadata and f.name in ( + "create_uid", + "write_uid", + "create_date", + "write_date", + "active", + ): return False return True fields = filter(field_filter, fields) - fields = sorted(fields, key=lambda f: ((f.name != 'id'), not f.is_required(), f.name)) + fields = sorted( + fields, key=lambda f: ((f.name != "id"), not f.is_required(), f.name) + ) - if skeleton == 'dict': - file.write(f'{model_mapping_name} = {{\n') + if skeleton == "dict": + file.write(f"{model_mapping_name} = {{\n") for f in fields: if verbose: - sys.stdout.write(f'Write field {f.name}\n') - line_start = '# ' if (required and not f.is_required() and f.name != 'id') or f.import_warn_msg else '' + sys.stdout.write(f"Write field {f.name}\n") + line_start = ( + "# " + if (required and not f.is_required() and f.name != "id") + or f.import_warn_msg + else "" + ) file.write(f" # {f.get_info()}\n") - mapper_command = f.get_mapper_command().replace('OBJECT_XMLID_PREFIX', f'PREFIX_{model_mapped_name.upper()}') - file.write(f" {line_start}'{f.get_mapping_name()}': {mapper_command},\n") - file.write('}\n\n') + mapper_command = f.get_mapper_command().replace( + "OBJECT_XMLID_PREFIX", f"PREFIX_{model_mapped_name.upper()}" + ) + file.write( + f" {line_start}'{f.get_mapping_name()}': {mapper_command},\n" + ) + file.write("}\n\n") - elif skeleton == 'map': - function_prefix = f'handle_{model_mapped_name}_' + elif skeleton == "map": + function_prefix = f"handle_{model_mapped_name}_" for f in fields: if verbose: - sys.stdout.write(f'Write map function of field {f.name}\n') - line_start = '# ' if (required and not f.is_required()) or f.import_warn_msg else '' - mapper_command = f.get_mapper_command().replace('OBJECT_XMLID_PREFIX', f'PREFIX_{model_mapped_name.upper()}') + sys.stdout.write(f"Write map function of field {f.name}\n") + line_start = ( + "# " + if (required and not f.is_required()) or f.import_warn_msg + else "" + ) + mapper_command = f.get_mapper_command().replace( + "OBJECT_XMLID_PREFIX", f"PREFIX_{model_mapped_name.upper()}" + ) file.write(f"{line_start}def {function_prefix}{f.name}(line):\n") file.write(f"{line_start} return {mapper_command}(line)\n\n") - file.write(f'{model_mapping_name} = {{\n') + file.write(f"{model_mapping_name} = {{\n") for f in fields: if verbose: - sys.stdout.write(f'Write field {f.name}\n') - line_start = '# ' if (required and not f.is_required() and f.name != 'id') or f.import_warn_msg else '' + sys.stdout.write(f"Write field {f.name}\n") + line_start = ( + "# " + if (required and not f.is_required() and f.name != "id") + or f.import_warn_msg + else "" + ) file.write(f" # {f.get_info()}\n") - file.write(f" {line_start}'{f.get_mapping_name()}': {function_prefix}{f.name},\n") - file.write('}\n\n') + file.write( + f" {line_start}'{f.get_mapping_name()}': {function_prefix}{f.name},\n" + ) + file.write("}\n\n") # Add selection dictionaries if --map-selection if mapsel: if verbose: - sys.stdout.write('Write mapping of selection fields\n') - with open(Path(base_dir) / 'mapping.py', 'a', encoding='utf-8') as pf: + sys.stdout.write("Write mapping of selection fields\n") + with open(Path(base_dir) / "mapping.py", "a", encoding="utf-8") as pf: pf.write(f"# Selection fields in model {model}\n\n") - for f in filter(lambda x: x.type == 'selection', fields): - sys.stdout.write(f'Write mapping of selection field {f.name}\n') - line_start = '# ' if (required and not f.is_required()) else '' + for f in filter(lambda x: x.type == "selection", fields): + sys.stdout.write(f"Write mapping of selection field {f.name}\n") + line_start = "# " if (required and not f.is_required()) else "" pf.write(f"{line_start}{model_mapped_name}_{f.name}_map = {{\n") for sel in f.selection: key, val = sel.split(selection_sep) - pf.write(f'{line_start} "{val.strip()}": {key.strip()},\n') + pf.write( + f'{line_start} "{val.strip()}": {key.strip()},\n' + ) pf.write(f"{line_start}}}\n\n") @@ -928,8 +1118,10 @@ def model_exists(model: str) -> bool: True if the model exists, False otherwise. """ connection = conf_lib.get_connection_from_config(config) - model_model = connection.get_model('ir.model') - res = model_model.search_count([('model', '=', model), ('transient', '=', False)]) + model_model = connection.get_model("ir.model") + res = model_model.search_count( + [("model", "=", model), ("transient", "=", False)] + ) return res != 0 @@ -939,14 +1131,19 @@ def scaffold_model() -> None: global dbname global host import configparser - cfg = configparser.ConfigParser(defaults={'protocol': 'xmlrpc', 'port': 8069}) + + cfg = configparser.ConfigParser( + defaults={"protocol": "xmlrpc", "port": 8069} + ) cfg.read(str(config)) - host = cfg.get('Connection', 'hostname') - dbname = cfg.get('Connection', 'database') - login = cfg.get('Connection', 'login') - uid = cfg.get('Connection', 'uid') + host = cfg.get("Connection", "hostname") + dbname = cfg.get("Connection", "database") + login = cfg.get("Connection", "login") + uid = cfg.get("Connection", "uid") - sys.stdout.write(f"Using connection file: {config} (db: {dbname}, host: {host}, login: {login}, uid: {uid})\n") + sys.stdout.write( + f"Using connection file: {config} (db: {dbname}, host: {host}, login: {login}, uid: {uid})\n" + ) if not dbname: offline = True @@ -960,89 +1157,114 @@ def scaffold_model() -> None: if outfile_path.is_file(): if force: if verbose: - sys.stdout.write(f"Output file {outfile_path} already exists and will be overwritten.\n") + sys.stdout.write( + f"Output file {outfile_path} already exists and will be overwritten.\n" + ) else: sys.stderr.write(f"The file {outfile_path} already exists.\n") do_file = False if do_file: # Write the file - with outfile_path.open('w', encoding='utf-8') as of: + with outfile_path.open("w", encoding="utf-8") as of: write_begin(of) write_mapping(of) write_end(of) if offline: - sys.stdout.write(f"Minimal skeleton code generated in {outfile_path}{' because no database is defined' if not dbname else ''}\n") + sys.stdout.write( + f"Minimal skeleton code generated in {outfile_path}{' because no database is defined' if not dbname else ''}\n" + ) else: sys.stdout.write(f"Skeleton code generated in {outfile_path}\n") else: - sys.stdout.write("Skeleton code not generated. Use option -k|--skeleton or -n|--offline or -f|--force to generate the python script.\n") + sys.stdout.write( + "Skeleton code not generated. Use option -k|--skeleton or -n|--offline or -f|--force to generate the python script.\n" + ) dirname = outfile_path.parent if append: - #Add command to transform script - script = dirname / f'transform{script_extension}' - if platform.system() == 'Windows': + # Add command to transform script + script = dirname / f"transform{script_extension}" + if platform.system() == "Windows": line = f"echo Transform {model_mapped_name}\npython {model_mapped_name}.py > %LOGDIR%\\transform_{model_mapped_name}_out.log 2> %LOGDIR%\\transform_{model_mapped_name}_err.log\n" else: os.system(f'sed -i "$ d" {script}') - line = f'load_script {model_mapped_name}\nchmod +x *.sh\n' - with script.open('a', encoding='utf-8') as f: + line = f"load_script {model_mapped_name}\nchmod +x *.sh\n" + with script.open("a", encoding="utf-8") as f: f.write(line) - sys.stdout.write(f'Script {model_mapped_name}.py added in {script}\n') + sys.stdout.write(f"Script {model_mapped_name}.py added in {script}\n") - #Add command to load script - script = dirname / f'load{script_extension}' - if platform.system() == 'Windows': + # Add command to load script + script = dirname / f"load{script_extension}" + if platform.system() == "Windows": line = f"echo Load {model_mapped_name}\ncall {model_mapped_name}{script_extension} > %LOGDIR%\\load_{model_mapped_name}_out.log 2> %LOGDIR%\\load_{model_mapped_name}_err.log\n" else: - line = f'load_script {model_mapped_name}\n' - with script.open('a', encoding='utf-8') as f: + line = f"load_script {model_mapped_name}\n" + with script.open("a", encoding="utf-8") as f: f.write(line) - sys.stdout.write(f'Script {model}{script_extension} added in {script}\n') + sys.stdout.write( + f"Script {model}{script_extension} added in {script}\n" + ) # Add model to prefix.py if not wxmlid: - script = dirname / 'prefix.py' + script = dirname / "prefix.py" line = f"PREFIX_{model_mapped_name.upper()} = f'{{project_name}}_{model_mapped_name}'\n" - with script.open('a', encoding='utf-8') as f: + with script.open("a", encoding="utf-8") as f: f.write(line) - sys.stdout.write(f'Prefix PREFIX_{model_mapped_name.upper()} added in {script}\n') + sys.stdout.write( + f"Prefix PREFIX_{model_mapped_name.upper()} added in {script}\n" + ) else: if verbose: - sys.stdout.write('XML_ID prefix not added because of option --with-xmlid\n') + sys.stdout.write( + "XML_ID prefix not added because of option --with-xmlid\n" + ) # Add model to files.py - script = dirname / 'files.py' - with script.open('a', encoding='utf-8') as f: + script = dirname / "files.py" + with script.open("a", encoding="utf-8") as f: f.write(f"# Model {model}\n") - f.write(f"src_{model_mapped_name} = data_src_dir / '{model_mapped_name}.csv'\n") - f.write(f"dest_{model_mapped_name} = data_dest_dir / '{model}.csv'\n") - sys.stdout.write(f'{model} files added in {script}\n') + f.write( + f"src_{model_mapped_name} = data_src_dir / '{model_mapped_name}.csv'\n" + ) + f.write( + f"dest_{model_mapped_name} = data_dest_dir / '{model}.csv'\n" + ) + sys.stdout.write(f"{model} files added in {script}\n") # Add model to clean_data.py - script = dirname / 'clean_data.py' - with script.open('a', encoding='utf-8') as f: - f.write("delete_xml_id(connection, '{}', f'{{project_name}}_{}', demo)\n".format(model, model_mapped_name)) - sys.stdout.write(f'Model {model} added in {script}\n') + script = dirname / "clean_data.py" + with script.open("a", encoding="utf-8") as f: + f.write( + "delete_xml_id(connection, '{}', f'{{project_name}}_{}', demo)\n".format( + model, model_mapped_name + ) + ) + sys.stdout.write(f"Model {model} added in {script}\n") else: - sys.stdout.write(f"You should probably add this model in files.py, prefix.py, clean_data.py, transform{script_extension} and load{script_extension} with -a|--append\n") + sys.stdout.write( + f"You should probably add this model in files.py, prefix.py, clean_data.py, transform{script_extension} and load{script_extension} with -a|--append\n" + ) def list_models() -> None: """List installed models in the target Odoo instance.""" connection = conf_lib.get_connection_from_config(config) - model_model = connection.get_model('ir.model') + model_model = connection.get_model("ir.model") - models = model_model.search_read([('transient', '=', False), ('model', '!=', '_unknown')], ['model', 'name']) + models = model_model.search_read( + [("transient", "=", False), ("model", "!=", "_unknown")], + ["model", "name"], + ) if not models: - sys.stdout.write('No model found !') + sys.stdout.write("No model found !") return - for m in sorted(models, key=lambda f: f['model']): + for m in sorted(models, key=lambda f: f["model"]): sys.stdout.write(f"{m['model']} ({m['name']})\n") @@ -1050,93 +1272,147 @@ def show_version() -> None: """Show the version of the script.""" sys.stdout.write(f"{module_name} {module_version}\n") + ############################################################################## # MAIN ############################################################################## -def create_export_script_file(model: str, model_mapped_name: str, outfile: Path, script_extension: str) -> None: + +def create_export_script_file( + model: str, model_mapped_name: str, outfile: Path, script_extension: str +) -> None: """Create a shell script to export data based on the generated mapper.""" - mapper_file_name = f'{model_mapped_name}.py' + mapper_file_name = f"{model_mapped_name}.py" mapper_file_path = Path(outfile).parent / mapper_file_name if not mapper_file_path.is_file(): - sys.stderr.write(f"Error: Mapper file {mapper_file_path} not found. Please generate it first using -m MODEL.\n") + sys.stderr.write( + f"Error: Mapper file {mapper_file_path} not found. Please generate it first using -m MODEL.\n" + ) sys.exit(1) - with mapper_file_path.open('r', encoding='utf-8') as f: + with mapper_file_path.open("r", encoding="utf-8") as f: mapper_content = f.read() # Simple parsing to extract field names from the mapping dictionary # This assumes the mapping is defined as a dictionary named 'mapping_' # and each field is a key in that dictionary. field_names = [] - mapping_var_name = f'mapping_{model_mapped_name}' - + mapping_var_name = f"mapping_{model_mapped_name}" + # Find the mapping dictionary definition - mapping_start = mapper_content.find(f'{mapping_var_name} = {{') + mapping_start = mapper_content.find(f"{mapping_var_name} = {{") if mapping_start == -1: - sys.stderr.write(f"Error: Could not find mapping dictionary '{mapping_var_name}' in {mapper_file_path}.\n") + sys.stderr.write( + f"Error: Could not find mapping dictionary '{mapping_var_name}' in {mapper_file_path}.\n" + ) sys.exit(1) - - mapping_end = mapper_content.find('}\n\n', mapping_start) + + mapping_end = mapper_content.find("}\n\n", mapping_start) if mapping_end == -1: - sys.stderr.write(f"Error: Could not find end of mapping dictionary in {mapper_file_path}.\n") + sys.stderr.write( + f"Error: Could not find end of mapping dictionary in {mapper_file_path}.\n" + ) sys.exit(1) - mapping_dict_str = mapper_content[mapping_start + len(f'{mapping_var_name} = {{'):mapping_end] - + mapping_dict_str = mapper_content[ + mapping_start + len(f"{mapping_var_name} = {{") : mapping_end + ] + for line in mapping_dict_str.splitlines(): line = line.strip() - if line.startswith('#') or not line: + if line.startswith("#") or not line: continue - + # Extract the key (field name) try: - field_name = line.split(':')[0].strip().strip('\'"') + field_name = line.split(":")[0].strip().strip("'\"") if field_name: field_names.append(field_name) except IndexError: continue if not field_names: - sys.stderr.write(f"Warning: No field names found in mapper file {mapper_file_path}.\n") - - export_script_name = f'{model_mapped_name}_export{script_extension}' + sys.stderr.write( + f"Warning: No field names found in mapper file {mapper_file_path}.\n" + ) + + export_script_name = f"{model_mapped_name}_export{script_extension}" export_script_path = Path(outfile).parent / export_script_name - with export_script_path.open('w', encoding='utf-8') as f: - if platform.system() != 'Windows': + with export_script_path.open("w", encoding="utf-8") as f: + if platform.system() != "Windows": f.write("#!/usr/bin/env bash\n\n") - f.write(f"odoo-data-flow export \\\n") - f.write(f" --config conf/connection.conf \\\n") - f.write(f" --model \"{model}\" \\\n") - f.write(f" --file \"origin/{model_mapped_name}.csv\" \\\n") - f.write(f" --fields \"{','.join(field_names)}\"\n") - - if platform.system() != 'Windows': + f.write("odoo-data-flow export \\\n") + f.write(" --config conf/connection.conf \\\n") + f.write(f' --model "{model}" \\\n') + f.write(f' --file "origin/{model_mapped_name}.csv" \\\n') + f.write(f' --fields "{",".join(field_names)}"\n') + + + if platform.system() != "Windows": export_script_path.chmod(0o755) sys.stdout.write(f"Export script created at {export_script_path}\n") + def main() -> None: """Main function.""" - global module_name, conf_dir_name, orig_dir_name, data_dir_name, log_dir_name, selection_sep, default_base_dir - global scaffold, base_dir, dbname, host, model, userid, config, outfile, required, skeleton, wstored, wo2m, wmetadata, mapsel, wxmlid, maxdescr, offline, append, list, force, verbose, version, fieldname - global script_extension, project_name, model_mapped_name, model_class_name, model_mapping_name, csv_delimiter, default_python_exe, default_path + global \ + module_name, \ + conf_dir_name, \ + orig_dir_name, \ + data_dir_name, \ + log_dir_name, \ + selection_sep, \ + default_base_dir + global \ + scaffold, \ + base_dir, \ + dbname, \ + host, \ + model, \ + userid, \ + config, \ + outfile, \ + required, \ + skeleton, \ + wstored, \ + wo2m, \ + wmetadata, \ + mapsel, \ + wxmlid, \ + maxdescr, \ + offline, \ + append, \ + list, \ + force, \ + verbose, \ + version, \ + fieldname + global \ + script_extension, \ + project_name, \ + model_mapped_name, \ + model_class_name, \ + model_mapping_name, \ + csv_delimiter, \ + default_python_exe, \ + default_path global conf_dir, orig_dir, orig_raw_dir, data_dir, log_dir module_name = Path(sys.argv[0]).name - conf_dir_name = 'conf' - orig_dir_name = 'origin' - data_dir_name = 'data' - log_dir_name = 'log' - selection_sep = ': ' - default_base_dir = Path('.') + conf_dir_name = "conf" + orig_dir_name = "origin" + data_dir_name = "data" + log_dir_name = "log" + selection_sep = ": " + default_base_dir = Path(".") - script_extension = '.cmd' if platform.system() == 'Windows' else '.sh' + script_extension = ".cmd" if platform.system() == "Windows" else ".sh" module_descr = f"""Version: {module_version} - Create the structure of an import project and model skeleton codes working + Create the structure of an import project and model skeleton codes working with odoo_data_flow (https://github.com/OdooDataFlow/odoo-data-flow). Functionalities: @@ -1157,32 +1433,187 @@ def main() -> None: More information on https://github.com/OdooDataFlow/odoo_dataflow_scaffold """ - parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=module_descr, epilog=module_epilog) - parser.add_argument('-s', '--scaffold', dest='scaffold', action='store_true', help='create the folders structure and the basic project files') - parser.add_argument('-p', '--path', dest='path', type=Path, default=default_base_dir, required=False, help='project path (default: current dir)') - parser.add_argument('-d', '--db', dest='dbname', default='', required=False, help='target database. If omitted, it is the first part of HOST') - parser.add_argument('-t', '--host', dest='host', default='localhost', required=False, help='hostname of the database (default: localhost)') - parser.add_argument('-u', '--userid', dest='userid', type=int, default=2, required=False, help='user id of RPC calls (default: 2)') - parser.add_argument('-m', '--model', dest='model', required=False, help='technical name of the model to skeleton (ex: res.partner)') - parser.add_argument('-c', '--config', dest='config', type=Path, default=Path(conf_dir_name) / 'connection.conf', required=False, help=f'configuration file (relative to --path) defining the RPC connections parameters (default: {Path(conf_dir_name) / "connection.conf"})') - parser.add_argument('-o', '--outfile', dest='outfile', type=Path, required=False, help='python script of the model skeleton code (default: model name with dots replaced by underscores)') - parser.add_argument('-k', '--skeleton', dest='skeleton', choices=['dict','map'], default='dict', required = False, help='skeleton code type. dict: generate mapping as a simple dictionary. map: create the same dictionary with map functions for each field (default: dict)') - parser.add_argument('-r', '--required', dest='required', action='store_true', help='keep only the required fields without default value (comment the optional fields') - parser.add_argument('--field-name', dest='fieldname', choices=['tech','user'], default='user', required = False, help='Field name in import file. tech=technical name, user=User name (default: user). Generates the mapping accordingly.') - parser.add_argument('--stored', dest='wstored', action='store_true', help="include only stored fields") - parser.add_argument('--with-o2m', dest='wo2m', action='store_true', help="include one2many fields") - parser.add_argument('--with-metadata', dest='wmetadata', action='store_true', help="include metadata fields") - parser.add_argument('--map-selection', dest='mapsel', action='store_true', help="generate inverse mapping dictionaries (visible value -> technical value) of selection fields in mapping.py") - parser.add_argument('--with-xmlid', dest='wxmlid', action='store_true', help="assume the client file contains XML_IDs in identifier fields") - parser.add_argument('--max-descr', dest='maxdescr', type=int, default=10, help="limit long descriptions of default value and compute method to MAXDESCR lines (default: 10)") - parser.add_argument('-n', '--offline', dest='offline', action='store_true', help="don't fetch fields from model. Create a minimal skeleton") - parser.add_argument('-a', '--append', dest='append', action='store_true', help="add model references to files.py, prefix.py and action scripts") - parser.add_argument('-f', '--force', dest='force', action='store_true', help='overwrite files and directories if existing.') - parser.add_argument('--create-export-script', dest='create_export_script', action='store_true', help="Create a shell script to export data based on the generated mapper.") - parser.add_argument('--export-fields', dest='export_fields', action='store_true', help="Output a comma-separated list of field names suitable for export.") - parser.add_argument('-l', '--list', dest='list', action='store_true', help="List installed models in the target Odoo instance") - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='display process information') - parser.add_argument('--version', dest='version', action='store_true', help='show version') + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=module_descr, + epilog=module_epilog, + ) + parser.add_argument( + "-s", + "--scaffold", + dest="scaffold", + action="store_true", + help="create the folders structure and the basic project files", + ) + parser.add_argument( + "-p", + "--path", + dest="path", + type=Path, + default=default_base_dir, + required=False, + help="project path (default: current dir)", + ) + parser.add_argument( + "-d", + "--db", + dest="dbname", + default="", + required=False, + help="target database. If omitted, it is the first part of HOST", + ) + parser.add_argument( + "-t", + "--host", + dest="host", + default="localhost", + required=False, + help="hostname of the database (default: localhost)", + ) + parser.add_argument( + "-u", + "--userid", + dest="userid", + type=int, + default=2, + required=False, + help="user id of RPC calls (default: 2)", + ) + parser.add_argument( + "-m", + "--model", + dest="model", + required=False, + help="technical name of the model to skeleton (ex: res.partner)", + ) + parser.add_argument( + "-c", + "--config", + dest="config", + type=Path, + default=Path(conf_dir_name) / "connection.conf", + required=False, + help=f"configuration file (relative to --path) defining the RPC connections parameters (default: {Path(conf_dir_name) / 'connection.conf'})", + ) + parser.add_argument( + "-o", + "--outfile", + dest="outfile", + type=Path, + required=False, + help="python script of the model skeleton code (default: model name with dots replaced by underscores)", + ) + parser.add_argument( + "-k", + "--skeleton", + dest="skeleton", + choices=["dict", "map"], + default="dict", + required=False, + help="skeleton code type. dict: generate mapping as a simple dictionary. map: create the same dictionary with map functions for each field (default: dict)", + ) + parser.add_argument( + "-r", + "--required", + dest="required", + action="store_true", + help="keep only the required fields without default value (comment the optional fields", + ) + parser.add_argument( + "--field-name", + dest="fieldname", + choices=["tech", "user"], + default="user", + required=False, + help="Field name in import file. tech=technical name, user=User name (default: user). Generates the mapping accordingly.", + ) + parser.add_argument( + "--stored", + dest="wstored", + action="store_true", + help="include only stored fields", + ) + parser.add_argument( + "--with-o2m", + dest="wo2m", + action="store_true", + help="include one2many fields", + ) + parser.add_argument( + "--with-metadata", + dest="wmetadata", + action="store_true", + help="include metadata fields", + ) + parser.add_argument( + "--map-selection", + dest="mapsel", + action="store_true", + help="generate inverse mapping dictionaries (visible value -> technical value) of selection fields in mapping.py", + ) + parser.add_argument( + "--with-xmlid", + dest="wxmlid", + action="store_true", + help="assume the client file contains XML_IDs in identifier fields", + ) + parser.add_argument( + "--max-descr", + dest="maxdescr", + type=int, + default=10, + help="limit long descriptions of default value and compute method to MAXDESCR lines (default: 10)", + ) + parser.add_argument( + "-n", + "--offline", + dest="offline", + action="store_true", + help="don't fetch fields from model. Create a minimal skeleton", + ) + parser.add_argument( + "-a", + "--append", + dest="append", + action="store_true", + help="add model references to files.py, prefix.py and action scripts", + ) + parser.add_argument( + "-f", + "--force", + dest="force", + action="store_true", + help="overwrite files and directories if existing.", + ) + parser.add_argument( + "--create-export-script", + dest="create_export_script", + action="store_true", + help="Create a shell script to export data based on the generated mapper.", + ) + parser.add_argument( + "--export-fields", + dest="export_fields", + action="store_true", + help="Output a comma-separated list of field names suitable for export.", + ) + parser.add_argument( + "-l", + "--list", + dest="list", + action="store_true", + help="List installed models in the target Odoo instance", + ) + parser.add_argument( + "-v", + "--verbose", + dest="verbose", + action="store_true", + help="display process information", + ) + parser.add_argument( + "--version", dest="version", action="store_true", help="show version" + ) args = parser.parse_args() @@ -1222,23 +1653,31 @@ def main() -> None: sys.exit(0) if model: - model_mapped_name = model.replace('.', '_') + model_mapped_name = model.replace(".", "_") if not outfile: - outfile = Path(f'{model_mapped_name}.py') + outfile = Path(f"{model_mapped_name}.py") if create_export_script: if not model: - sys.stderr.write("Error: --create-export-script requires -m MODEL to be specified.\n") + sys.stderr.write( + "Error: --create-export-script requires -m MODEL to be specified.\n" + ) sys.exit(1) - create_export_script_file(model, model_mapped_name, outfile, script_extension) + create_export_script_file( + model, model_mapped_name, outfile, script_extension + ) sys.exit(0) if export_fields: if not model: - sys.stderr.write("Error: --export-fields requires -m MODEL to be specified.\n") + sys.stderr.write( + "Error: --export-fields requires -m MODEL to be specified.\n" + ) sys.exit(1) if not dbname: - sys.stderr.write("Error: --export-fields requires a database connection. Please provide -d DBNAME or ensure connection.conf is properly configured.\n") + sys.stderr.write( + "Error: --export-fields requires a database connection. Please provide -d DBNAME or ensure connection.conf is properly configured.\n" + ) sys.exit(1) sys.stdout.write("Generating import-compatible fields...\n") @@ -1246,7 +1685,7 @@ def main() -> None: exportable_field_names = [] for f in fields: # Exclude fields not suitable for direct import - if f.name in ('create_uid', 'write_uid'): + if f.name in ("create_uid", "write_uid"): continue if f.compute: continue @@ -1254,26 +1693,30 @@ def main() -> None: continue if not f.store: continue - if f.type == 'one2many': + if f.type == "one2many": continue - + exportable_field_names.append(f.name) - + sys.stdout.write(",".join(exportable_field_names) + "\n") sys.stdout.write("Import-compatible fields generated.\n") # If no action set, prompt for scaffolding action_args = [scaffold, model] if not any(action_args): - response = input(f"Do you want the create the folder structure in {base_dir} ? (y|N): ") - scaffold = ('Y' == response.upper()) + response = input( + f"Do you want the create the folder structure in {base_dir} ? (y|N): " + ) + scaffold = "Y" == response.upper() if not scaffold and not model: - sys.stderr.write('You need to set an action with -s|--scaffold or -m|--model or -l|--list\n') - sys.stderr.write(f'Type {module_name} -h|--help for help\n') + sys.stderr.write( + "You need to set an action with -s|--scaffold or -m|--model or -l|--list\n" + ) + sys.stderr.write(f"Type {module_name} -h|--help for help\n") sys.exit(1) - script_extension = '.cmd' if platform.system() == 'Windows' else '.sh' + script_extension = ".cmd" if platform.system() == "Windows" else ".sh" # Do cascaded actions if scaffold: @@ -1284,29 +1727,30 @@ def main() -> None: conf_dir = base_dir / conf_dir_name orig_dir = base_dir / orig_dir_name - orig_raw_dir = orig_dir / 'binary' + orig_raw_dir = orig_dir / "binary" data_dir = base_dir / data_dir_name log_dir = base_dir / log_dir_name # If database is omitted, get the first part of the hostname if is_remote_host(host) and not dbname: - dbname = host.split('.')[0] - sys.stdout.write(f'Database is set by default to {dbname}.\n') + dbname = host.split(".")[0] + sys.stdout.write(f"Database is set by default to {dbname}.\n") scaffold_dir() if model: - model_mapped_name = model.replace('.', '_') - model_class_name = model.title().replace('.', '') - model_mapping_name = f'mapping_{model_mapped_name}' + model_mapped_name = model.replace(".", "_") + model_class_name = model.title().replace(".", "") + model_mapping_name = f"mapping_{model_mapped_name}" if not outfile: - outfile = Path(f'{model_mapped_name}.py') + outfile = Path(f"{model_mapped_name}.py") config = base_dir / config - csv_delimiter = ';' - default_python_exe = '' - default_path = '' + csv_delimiter = ";" + default_python_exe = "" + default_path = "" scaffold_model() -if __name__ == '__main__': + +if __name__ == "__main__": main() From e001c3841714a86796ebb3ce085918a6cdc8a84b Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Mon, 14 Jul 2025 19:17:45 +0200 Subject: [PATCH 11/24] :sparkles: By default export technical names Main use case of this library is import/export. Makes sense to have an import compatible export by default. --- odoo_dataflow_scaffold.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 456ebd4..24e7403 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1347,8 +1347,8 @@ def create_export_script_file( f.write(" --config conf/connection.conf \\\n") f.write(f' --model "{model}" \\\n') f.write(f' --file "origin/{model_mapped_name}.csv" \\\n') - f.write(f' --fields "{",".join(field_names)}"\n') - + f.write(f' --fields "{",".join(field_names)}" \\\n') + f.write(" --technical-names\n") if platform.system() != "Windows": export_script_path.chmod(0o755) From a30d733d3cf663c9fcfdd54b6d8d41619beaa2db Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Mon, 14 Jul 2025 21:22:47 +0200 Subject: [PATCH 12/24] fix(scaffold): Read config file before checking for dbname The script was previously checking for the database name from the command-line arguments before it had parsed the config file. This caused an incorrect "database not found" error when the database was correctly specified in the config file but not as a `-d` argument. This commit fixes the bug by reordering the logic in the `main` function to ensure the config file is read *before* any operations that depend on its values are executed. The script now correctly uses the database name from the config file as a fallback if the `-d` flag is not provided. --- odoo_dataflow_scaffold.py | 57 ++++++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 24e7403..2b92f48 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- import argparse +import configparser import io import os import platform @@ -16,6 +17,37 @@ offline = False dbname = "" hostname = "" +force = False +data_dir_name = "data" +log_dir_name = "log" +conf_dir_name = "conf" +orig_dir_name = "origin" +project_name = "" +model_mapped_name = "" +selection_sep = ": " +script_extension = ".cmd" if platform.system() == "Windows" else ".sh" +default_python_exe = "" +default_path = "" +csv_delimiter = ";" +base_dir = Path(".") +has_tracked_fields = False +has_computed_fields = False +model = "" +userid = 2 +config = "" +outfile = "" +required = False +skeleton = "" +wstored = False +wo2m = False +wmetadata = False +mapsel = False +wxmlid = False +maxdescr = 10 +append = False +verbose = False +fieldname = "" + ############################################################################## # FUNCTIONS FOR DIRECTORY STRUCTURE @@ -1422,8 +1454,8 @@ def main() -> None: - Skeleton a model: {module_name} -m MODEL [-a] [--map-selection] [--with-xmlid] [-r] [-k map | -n] - [--with-one2many] [--with-metadata] [--stored] [-v] - [--max-descr MAXDESCR] [-f] [-o OUTFILE] [-c CONFIG] + [--with-one2many] [--with-metadata] [--stored] [-v] + [--max-descr MAXDESCR] [-f] [-o OUTFILE] [-c CONFIG] - Show available models: {module_name} -l [-c CONFIG] @@ -1636,19 +1668,29 @@ def main() -> None: maxdescr = args.maxdescr offline = args.offline append = args.append - create_export_script = args.create_export_script - export_fields = args.export_fields - list = args.list + list_models_flag = args.list force = args.force verbose = args.verbose version = args.version fieldname = args.fieldname + create_export_script = args.create_export_script + export_fields = args.export_fields - # Do unit actions + # --- Start of Corrected Logic --- + # 1. Parse config file early to get database name if not provided by CLI + if not dbname: + config_path = base_dir / config + if config_path.is_file(): + cfg = configparser.ConfigParser() + cfg.read(str(config_path)) + if cfg.has_section("Connection"): + dbname = cfg.get("Connection", "database") + + # 2. Now perform actions that require the database name if version: show_version() sys.exit(0) - if list: + if list_models_flag: list_models() sys.exit(0) @@ -1700,6 +1742,7 @@ def main() -> None: sys.stdout.write(",".join(exportable_field_names) + "\n") sys.stdout.write("Import-compatible fields generated.\n") + sys.exit(0) # If no action set, prompt for scaffolding action_args = [scaffold, model] From cddf4b6063b8f48426c7e684cbf41836ac8c595b Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Tue, 15 Jul 2025 10:43:43 +0200 Subject: [PATCH 13/24] :art::sparkles:[FEAT](scaffold): Add migration-aware config file options Adds support for separate source and destination database configurations to the scaffolding tool, providing a more flexible workflow for data migrations between different Odoo instances. Previously, the scaffold assumed a single `connection.conf` for all operations, making it difficult to read metadata from one database (e.g., Odoo 12) and generate an import script for another (e.g., Odoo 18). This commit introduces two new command-line arguments: - `--source-config`: Specifies the connection file for the source database. - `--destination-config`: Specifies the connection file for the destination database. The script now uses these arguments to: - Generate a `files.py` that references both config files. - Ensure that operations requiring database inspection (like `--export-fields`) use the source configuration. - The generated transformation scripts will be updated in a subsequent commit to use the destination config for the final import step. The old `--config` flag is retained as a fallback for single-database workflows, ensuring backward compatibility. --- odoo_dataflow_scaffold.py | 167 +++++++++++++++++++++++--------------- 1 file changed, 101 insertions(+), 66 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 2b92f48..32368db 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -13,7 +13,7 @@ from odoo_data_flow.lib import conf_lib -module_version = "1.4.2" +module_version = "1.4.3" offline = False dbname = "" hostname = "" @@ -47,6 +47,13 @@ append = False verbose = False fieldname = "" +conf_dir = "" +orig_dir = "" +orig_raw_dir = "" +data_dir = "" +log_dir = "" +model_class_name = "" +model_mapping_name = "" ############################################################################## @@ -116,12 +123,13 @@ def is_remote_host(hostname: str) -> bool: @check_file_exists -def create_connection_file_local(file: Path) -> None: - """Enforce encrypted connection on remote hosts. - Leave unencrypted for local databases. +def create_connection_file( + file: Path, host: str, dbname: str, userid: int +) -> None: + """Create a connection file. - Args: - file: The path to the connection file. + Enforce encrypted connection on remote hosts. + Leave unencrypted for local databases. """ is_remote = is_remote_host(host) protocol = "jsonrpcs" if is_remote else "jsonrpc" @@ -137,25 +145,6 @@ def create_connection_file_local(file: Path) -> None: f.write(f"uid = {userid}\n") -@check_file_exists -def create_connection_file_remote(file: Path, hostname: str) -> None: - """Just a preset for encrypted connection. - - Args: - file: The path to the connection file. - hostname: The hostname of the remote server. - """ - with file.open("w", encoding="utf-8") as f: - f.write("[Connection]\n") - f.write(f"hostname = {hostname}\n") - f.write("database = \n") - f.write("login = \n") - f.write("password = \n") - f.write("protocol = jsonrpcs\n") - f.write("port = 443\n") - f.write(f"uid = {userid}\n") - - @check_file_exists def create_cleanup_script(file: Path) -> None: """Create the shell script to empty the folder "data". @@ -225,6 +214,7 @@ def create_transform_script(file: Path) -> None: f.write("}\n\n") f.write("./cleanup_data_dir.sh\n\n") f.write("# Add here all transform commands\n") + f.write("# python my_model.py\n") f.write("# load_script python_script (without extension)\n") f.write("chmod +x *.sh\n") file.chmod(0o755) @@ -337,6 +327,9 @@ def create_file_prefix(file: Path) -> None: def create_file_mapping(file: Path) -> None: """Create the skeleton of mapping.py. + Use odoo_import_scaffold with option --map-selection to + automatically build dictionaries of selection fields. + Args: file: The path to the mapping.py file. """ @@ -349,26 +342,20 @@ def create_file_mapping(file: Path) -> None: @check_file_exists -def create_file_files(file: Path) -> None: - """Create the skeleton of files.py. - - Args: - file: The path to the files.py file. - """ +def create_file_files(file: Path, source_config: str, dest_config: str) -> None: + """Create the skeleton of files.py.""" with file.open("w", encoding="utf-8") as f: f.write("# -*- coding: utf-8 -*-\n\n") f.write("# This file defines the names of all used files.\n\n") - f.write("from pathlib import Path\n") - f.write("\n") + f.write("from pathlib import Path\n\n") f.write("# Folders\n") f.write(f"conf_dir = Path('{conf_dir_name}')\n") f.write(f"data_src_dir = Path('{orig_dir_name}')\n") f.write("data_raw_dir = data_src_dir / 'binary'\n") - f.write(f"data_dest_dir = Path('{data_dir_name}')\n") - f.write("\n") + f.write(f"data_dest_dir = Path('{data_dir_name}')\n\n") f.write("# Configuration\n") - f.write("config_file = conf_dir / 'connection.conf'\n") - f.write("\n") + f.write(f"source_config_file = conf_dir / '{source_config}'\n") + f.write(f"destination_config_file = conf_dir / '{dest_config}'\n\n") f.write("# Declare here all data files\n") if not model: @@ -718,23 +705,58 @@ def create_file_init_map(file: Path) -> None: ) -def scaffold_dir() -> None: +def scaffold_dir( + args: argparse.Namespace, source_config: str, dest_config: str +) -> None: """Create the whole directory structure and the basic project files.""" + # If database is omitted, get the first part of the hostname + if is_remote_host(args.host) and not args.dbname: + dbname = args.host.split(".")[0] + sys.stdout.write(f"Database is set by default to {dbname}.\n") + else: + dbname = args.dbname + + conf_dir = base_dir / conf_dir_name + orig_dir = base_dir / orig_dir_name + conf_dir = base_dir / conf_dir_name + orig_dir = base_dir / orig_dir_name + orig_raw_dir = orig_dir / "binary" + data_dir = base_dir / data_dir_name + log_dir = base_dir / log_dir_name + create_folder(Path(conf_dir)) create_folder(Path(orig_dir)) create_folder(Path(orig_raw_dir)) create_folder(Path(data_dir)) create_folder(Path(log_dir)) - create_connection_file_local(Path(conf_dir) / "connection.conf") - create_connection_file_local(Path(conf_dir) / "connection.local") - create_connection_file_remote( - Path(conf_dir) / "connection.staging", ".dev.odoo.com" + # Create the source connection file + create_connection_file( + conf_dir / source_config, args.host, dbname, args.userid + ) + + create_connection_file( + conf_dir / "local_connection.conf", "localhost", dbname, args.userid + ) + create_connection_file( + conf_dir / "staging_connection.conf", + ".dev.odoo.com", + dbname, + args.userid, ) - create_connection_file_remote( - Path(conf_dir) / "connection.master", ".odoo.com" + create_connection_file( + conf_dir / "prod_connection.conf", ".odoo.com", dbname, args.userid ) + # If a separate destination is specified, create a placeholder for it + if source_config != dest_config: + create_connection_file( + conf_dir / dest_config, + "DESTINATION_HOST", + "DESTINATION_DB", + args.userid, + ) + create_cleanup_script( Path(base_dir) / f"cleanup_data_dir{script_extension}" ) @@ -742,7 +764,7 @@ def scaffold_dir() -> None: create_load_script(Path(base_dir) / f"load{script_extension}") create_file_prefix(Path(base_dir) / "prefix.py") create_file_mapping(Path(base_dir) / "mapping.py") - create_file_files(Path(base_dir) / "files.py") + create_file_files(Path(base_dir) / "files.py", source_config, dest_config) create_file_lib(Path(base_dir) / "funclib.py") create_file_clean_data(Path(base_dir) / "clean_data.py") create_file_install_lang(Path(base_dir) / "install_lang.py") @@ -1467,8 +1489,7 @@ def main() -> None: parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, - description=module_descr, - epilog=module_epilog, + description=f"Version: {module_version}\nTool for scaffolding odoo-data-flow projects.", ) parser.add_argument( "-s", @@ -1523,9 +1544,19 @@ def main() -> None: "--config", dest="config", type=Path, - default=Path(conf_dir_name) / "connection.conf", - required=False, - help=f"configuration file (relative to --path) defining the RPC connections parameters (default: {Path(conf_dir_name) / 'connection.conf'})", + help="Default config file if source/destination are not set.", + ) + parser.add_argument( + "--source-config", + dest="source_config", + type=Path, + help="Source Odoo instance for reading metadata.", + ) + parser.add_argument( + "--destination-config", + dest="destination_config", + type=Path, + help="Destination Odoo instance for writing import scripts.", ) parser.add_argument( "-o", @@ -1677,14 +1708,26 @@ def main() -> None: export_fields = args.export_fields # --- Start of Corrected Logic --- - # 1. Parse config file early to get database name if not provided by CLI + + # Determine which config files to use + source_config_name = args.source_config or args.config or "connection.conf" + dest_config_name = ( + args.destination_config or args.config or "dest_connection.conf" + ) + + # Read config file early to get database name if not provided by CLI + # Read dbname from source config for operations that need it + config_path = args.path / source_config_name + dbname = args.dbname if not dbname: - config_path = base_dir / config + config_path = base_dir / source_config_name + print(f"Reading database name from config file {config_path} ...") if config_path.is_file(): cfg = configparser.ConfigParser() cfg.read(str(config_path)) if cfg.has_section("Connection"): dbname = cfg.get("Connection", "database") + print(f"Found database name *{dbname}* from config file.") # 2. Now perform actions that require the database name if version: @@ -1711,6 +1754,7 @@ def main() -> None: sys.exit(0) if export_fields: + print("export fields called") if not model: sys.stderr.write( "Error: --export-fields requires -m MODEL to be specified.\n" @@ -1752,6 +1796,7 @@ def main() -> None: ) scaffold = "Y" == response.upper() + # If still no action, exit with help message if not scaffold and not model: sys.stderr.write( "You need to set an action with -s|--scaffold or -m|--model or -l|--list\n" @@ -1759,27 +1804,17 @@ def main() -> None: sys.stderr.write(f"Type {module_name} -h|--help for help\n") sys.exit(1) - script_extension = ".cmd" if platform.system() == "Windows" else ".sh" - # Do cascaded actions - if scaffold: - if base_dir == default_base_dir: - project_name = Path.cwd().name - else: - project_name = base_dir.name + script_extension = ".cmd" if platform.system() == "Windows" else ".sh" - conf_dir = base_dir / conf_dir_name - orig_dir = base_dir / orig_dir_name - orig_raw_dir = orig_dir / "binary" - data_dir = base_dir / data_dir_name - log_dir = base_dir / log_dir_name + if args.scaffold: + scaffold_dir(args, source_config_name, dest_config_name) - # If database is omitted, get the first part of the hostname if is_remote_host(host) and not dbname: dbname = host.split(".")[0] sys.stdout.write(f"Database is set by default to {dbname}.\n") - scaffold_dir() + scaffold_dir(args, str(source_config_name), str(dest_config_name)) if model: model_mapped_name = model.replace(".", "_") From c3582601730cdbeab0408d7d52f61b1d567beaa5 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Tue, 15 Jul 2025 10:58:43 +0200 Subject: [PATCH 14/24] :memo: Update documentation for data migrations Document the new source-config and destination-config. For consistency use `.conf` extension --- README.md | 51 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index b3778b0..1eaeee0 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,7 @@ To install `odoo_dataflow_scaffold` from GitHub, use the following command: git clone git@github.com:OdooDataFlow/odoo_dataflow_scaffold.git ``` - + ```bash [sudo] pip install odoo_dataflow_scaffold @@ -72,11 +72,24 @@ The main options are: * `-t | --host`: Sets the hostname of the database. The default is "localhost". * `-u | --userid`: Sets the user ID used by RPC calls. The default is "2". -For more information on project scaffolding, see [Folders Structure and Project Files](#3-folders-structure-and-project-files). +### 2.1.1. For Data Migrations (Source & Destination DBs) + +For data migration scenarios where you need to read from one database and write to another, you can specify separate configuration files. + +* `--source-config`: Path to the connection file for the source database (for reading metadata). +* `--destination-config`: Path to the connection file for the destination database (for generating import scripts). + +**Example Usage:** +```bash +odoo_dataflow_scaffold.py -s -p my_migration \ + --source-config conf/odoo12.conf \ + --destination-config conf/odoo18.conf +``` +This will create two separate config files in the `conf/` directory. The generated transformation scripts will be pre-configured to use the source config for reading and the destination config for writing the final `load.sh` script. ## 2.2. Generate a model skeleton code -From your project folder, verify the connection parameters in `conf/connection.conf` and generate the skeleton code for a model: +From your project folder, verify the connection parameters in `conf/connection.conf` (or your source/destination files) and generate the skeleton code for a model: ``` odoo_dataflow_scaffold.py -m my.model -a [--map-selection] [--with-xmlid] @@ -109,10 +122,10 @@ The complete set of options is described [here](#4-model-skeleton-codes). src_my_model = os.path.join(data_src_dir, 'my_model.csv') ``` - * Review the python script _my_model.py_. In the generated code, you should at least: - * verify the column names. By default their name is the same as the field, which is probably not correct for all columns of the client file. When the tag 'CSV_COLUMN' appears, you also have to replace it by the right column name, - * apply the right date formats, if any. You always need to replace the tag 'CSV_DATE_FORMAT' with the [directives](https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior) reflecting the field format in your client file, - * comment or remove the fields you don't need. + * Review the python script _my_model.py_. In the generated code, you should at least: + * verify the column names. By default their name is the same as the field, which is probably not correct for all columns of the client file. When the tag 'CSV_COLUMN' appears, you also have to replace it by the right column name, + * apply the right date formats, if any. You always need to replace the tag 'CSV_DATE_FORMAT' with the [directives](https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior) reflecting the field format in your client file, + * comment or remove the fields you don't need. ```python mapping_my_model = { # ID (#2841): stored, optional, readonly, integer @@ -122,6 +135,7 @@ The complete set of options is described [here](#4-model-skeleton-codes). 'company_id/id': mapper.m2o(PREFIX_RES_COMPANY, 'company_id'), # Date of Transfer (#2832): stored, optional, readonly, datetime 'date_done': mapper.val('date_done', postprocess=lambda x: datetime.strptime(x, 'CSV_DATE_FORMAT').strftime('%Y-%m-%d 00:00:00')), + } ``` * By default the delimiter of your CSV file is set to a semicolon ';'. If you use another delimiter you need to change it at the line: @@ -129,10 +143,10 @@ The complete set of options is described [here](#4-model-skeleton-codes). ```python processor = Processor(src_my_model, delimiter=';', preprocess=preprocess_MyModel) ``` - * All other project files are automatically set up. Although it's always advised to review: - * `mapping.py` if you used the option **--map-selection**, - * `prefix.py` if you import boolean fields or if you didn't use the option **--with-xmlid**, - * the transform script `transform.sh` (`transform.cmd` on Windows) and the load script `load.sh` (`load.cmd` on Windows) to be sure that all shell commands you need will be launched. + * All other project files are automatically set up. Although it's always advised to review: + * `mapping.py` if you used the option **--map-selection**, + * `prefix.py` if you import boolean fields or if you didn't use the option **--with-xmlid**, + * the transform script `transform.sh` (`transform.cmd` on Windows) and the load script `load.sh` (`load.cmd` on Windows) to be sure that all shell commands you need will be launched. See the options **[--map-selection](#map-selection)** and **[-a | --append](#append)** for more details. @@ -198,9 +212,9 @@ The following directories and files are created: * `conf/`: This directory contains connection configuration files: * `connection.conf`: The default configuration file for RPC calls. - * `connection.local`: A preset for importing data into a local database. - * `connection.staging`: A preset for importing data into a staging database (uses encrypted connections). - * `connection.master`: A preset for importing data into the master database (uses encrypted connections). + * `local_connection.conf`: A preset for importing data into a local database. + * `staging_connection.conf`: A preset for importing data into a staging database (uses encrypted connections). + * `master_connection.conf`: A preset for importing data into the master database (uses encrypted connections). * `origin/`: This directory stores the original client files in CSV format. * `origin/binary/`: This directory stores any binary files associated with the client data (e.g., images, documents). * `data/`: This directory stores the transformed files ready for import, generated after running the transformation script. @@ -224,9 +238,9 @@ The following directories and files are created: The following options further configure the connection files during the scaffolding process: -* When the `-d | --db` and `-t | --host` options are provided, the database name and hostname are stored in both the `connection.local` and `connection.conf` files. The default hostname is `localhost`, and the default database user credentials are `admin/admin`. +* When the `-d | --db` and `-t | --host` options are provided, the database name and hostname are stored in both the `local_connection.conf` and `connection.conf` files. The default hostname is `localhost`, and the default database user credentials are `admin/admin`. * The default user ID for RPC calls is `2`. You can change this using the `-u | --userid` option. -* The `connection.local` and ` configured to establish encrypted connections when you specify a remote host. Connections remain unencrypted only when the database is located on `localhost`. +* The `local_connection.conf` and ` configured to establish encrypted connections when you specify a remote host. Connections remain unencrypted only when the database is located on `localhost`. * Scaffolding in an existing path preserves any existing files and directories. To overwrite them, use the `-f | --force` option. @@ -342,8 +356,7 @@ res_partner_sale_warn_map = { "Warning": 'warning', "Blocking Message": 'block', } -``` -`res_partner.py` +```res_partner.py` ```python mapping_res_partner = { ... @@ -436,7 +449,7 @@ odoo_dataflow_scaffold.py -s -p project_dir -d my_db -t my_db.odoo.com -m my.mod It is possible to reduce the command line. If the option **-t | --host** is used without **-d | --db**, the database will be the first part of the hostname. So, this command line is equivalent to the previous one. -```bash + ```bash odoo_dataflow_scaffold.py -s -p project_dir -t my_db.odoo.com -m my.model -a ``` From 64a3e96f6a4d8df614396b0eabb6a8c6925bff76 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Tue, 15 Jul 2025 11:28:21 +0200 Subject: [PATCH 15/24] :bug: Restore default connection. --- odoo_dataflow_scaffold.py | 1 + 1 file changed, 1 insertion(+) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 32368db..ca073ae 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1544,6 +1544,7 @@ def main() -> None: "--config", dest="config", type=Path, + default=Path(conf_dir_name) / "connection.conf", help="Default config file if source/destination are not set.", ) parser.add_argument( From ce1474775e94b1257452c2a73186f28c5d275139 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Tue, 15 Jul 2025 11:49:06 +0200 Subject: [PATCH 16/24] [FIX] Fix the bug that no file is written when using --export-fields flag --- odoo_dataflow_scaffold.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index ca073ae..f9a2a1e 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1767,7 +1767,7 @@ def main() -> None: ) sys.exit(1) - sys.stdout.write("Generating import-compatible fields...\n") + sys.stdout.write("Printing import-compatible fields to console...\n") fields = load_fields() exportable_field_names = [] for f in fields: @@ -1785,10 +1785,6 @@ def main() -> None: exportable_field_names.append(f.name) - sys.stdout.write(",".join(exportable_field_names) + "\n") - sys.stdout.write("Import-compatible fields generated.\n") - sys.exit(0) - # If no action set, prompt for scaffolding action_args = [scaffold, model] if not any(action_args): From 809c40b9971cc42d365a8c5c6dc8c3566572e5b5 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Tue, 15 Jul 2025 12:30:57 +0200 Subject: [PATCH 17/24] :sparkles: :boom: feat(scaffold): Generate migration-aware transformation scripts This commit continues the refactoring to make the scaffolding tool "migration-aware". The generated Python transformation scripts (`your_model.py`) are now aware of separate source and destination databases. - The `Processor` is now initialized with the `source_config_file` to ensure it reads metadata from the correct source database when necessary. - The `params` dictionary passed to `processor.process()` now includes a `'config'` key pointing to the `destination_config_file`. This ensures that the final `load.sh` script generated by `processor.write_to_file()` will correctly target the destination database for the import. This change depends on a corresponding update to the `odoo-data-flow` library to correctly handle the new `'config'` parameter. --- odoo_dataflow_scaffold.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index f9a2a1e..b22cfaf 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1005,7 +1005,7 @@ def write_begin(file: io.TextIOWrapper) -> None: file.write("# import odoolib\n") file.write("# from odoo_data_flow.lib import conf_lib\n") file.write( - "# connection = conf_lib.get_connection_from_config(config_file)\n" + "# connection = conf_lib.get_connection_from_config(source_config_file)\n" ) file.write(f"def preprocess_{model_class_name}(header, data):\n") file.write(" # Do nothing\n") @@ -1025,7 +1025,7 @@ def write_begin(file: io.TextIOWrapper) -> None: file.write(" # return header, data_new\n") file.write("\n") file.write( - f"processor = Processor(src_{model_mapped_name}, delimiter='{csv_delimiter}', preprocess=preprocess_{model_class_name})\n" + f"processor = Processor(src_{model_mapped_name}, config_file=source_config_file, delimiter='{csv_delimiter}', preprocess=preprocess_{model_class_name})\n" ) file.write("\n") @@ -1051,7 +1051,7 @@ def write_end(file: io.TextIOWrapper) -> None: ctx = f"'context': {{{', '.join(ctx_opt)}}}, " file.write( - f"processor.process({model_mapping_name}, dest_{model_mapped_name}, {{'model': '{model}', {ctx}'groupby': '', 'worker': DEFAULT_WORKER, 'batch_size': DEFAULT_BATCH_SIZE}}, 'set')\n\n" + f"processor.process({model_mapping_name}, dest_{model_mapped_name}, {{'config': destination_config_file, 'model': '{model}', {ctx}'groupby': '', 'worker': DEFAULT_WORKER, 'batch_size': DEFAULT_BATCH_SIZE}}, 'set')\n\n" ) file.write( f"processor.write_to_file('{model_mapped_name}{script_extension}', python_exe='{default_python_exe}', path='{default_path}')\n\n" From 60713c67d17da00cce4d1ac5af188df4099e8923 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Tue, 15 Jul 2025 15:54:08 +0200 Subject: [PATCH 18/24] :bug: fix(scaffold): Use source config for export script generation The `--create-export-script` command was previously using a hardcoded path (`conf/connection.conf`) instead of respecting the new migration-aware configuration flags. This caused the generated export script to point to the wrong database in a migration scenario. This commit fixes the bug by: 1. Updating the `create_export_script_file` function to accept the source config file path as an argument. 2. Passing the correct source config path from the `main` function. 3. Using this dynamic path to generate the correct `--config` flag in the export script. The generated export script now correctly uses the `--source-config` file, allowing it to pull data from the intended source database. --- odoo_dataflow_scaffold.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index b22cfaf..b485860 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1333,7 +1333,11 @@ def show_version() -> None: def create_export_script_file( - model: str, model_mapped_name: str, outfile: Path, script_extension: str + model: str, + model_mapped_name: str, + outfile: Path, + script_extension: str, + source_config: str, ) -> None: """Create a shell script to export data based on the generated mapper.""" mapper_file_name = f"{model_mapped_name}.py" @@ -1398,7 +1402,7 @@ def create_export_script_file( if platform.system() != "Windows": f.write("#!/usr/bin/env bash\n\n") f.write("odoo-data-flow export \\\n") - f.write(" --config conf/connection.conf \\\n") + f.write(f' --config "{source_config}" \\\n') f.write(f' --model "{model}" \\\n') f.write(f' --file "origin/{model_mapped_name}.csv" \\\n') f.write(f' --fields "{",".join(field_names)}" \\\n') @@ -1750,7 +1754,11 @@ def main() -> None: ) sys.exit(1) create_export_script_file( - model, model_mapped_name, outfile, script_extension + model, + model_mapped_name, + outfile, + script_extension, + source_config_name, ) sys.exit(0) From 257d797ebb0d4ec54f2a4d38af72df011f248aaa Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Tue, 15 Jul 2025 22:13:05 +0200 Subject: [PATCH 19/24] :zap: Export image fields only once Performance caan be increased by not exporting all the resized image fields. Resulting file size will be a lot smaller by excluding them as well. --- odoo_dataflow_scaffold.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index b485860..9a12ebd 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1782,6 +1782,15 @@ def main() -> None: # Exclude fields not suitable for direct import if f.name in ("create_uid", "write_uid"): continue + if f.name in ( + "image_small", + "image_medium", + "image_128", + "image_256", + "image_512", + "image_1024", + ): + continue if f.compute: continue if f.related: From f3cd51af1233cd9d583ab4f331b1da04a8de3aa8 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Wed, 16 Jul 2025 14:32:58 +0200 Subject: [PATCH 20/24] :boom: Update export to new syntax. --file is deprecated for export commands. Use `--output` now. --- odoo_dataflow_scaffold.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 9a12ebd..657306c 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1404,7 +1404,7 @@ def create_export_script_file( f.write("odoo-data-flow export \\\n") f.write(f' --config "{source_config}" \\\n') f.write(f' --model "{model}" \\\n') - f.write(f' --file "origin/{model_mapped_name}.csv" \\\n') + f.write(f' --output "origin/{model_mapped_name}.csv" \\\n') f.write(f' --fields "{",".join(field_names)}" \\\n') f.write(" --technical-names\n") From 56798faaec9eb915e944a1e9181b1a53e30b1dd0 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Wed, 16 Jul 2025 16:59:34 +0200 Subject: [PATCH 21/24] :sparkles: :zap: feat(scaffold): Generate Polars schema to optimize performance The scaffold tool now automatically generates a Polars schema dictionary (`dtypes`) in the model transformation scripts it creates. This schema is built by inspecting the field types in the source Odoo database. By providing this schema to the `Processor`, the CSV reading step can skip the slow type-inference process, resulting in a significant performance improvement for large files. Additionally, the generated `transform.sh` script now includes `export POLARS_MAX_THREADS` to ensure that Polars can leverage all available CPU cores for transformations, further boosting performance. --- odoo_dataflow_scaffold.py | 42 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 657306c..8aad95e 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -186,6 +186,8 @@ def create_transform_script(file: Path) -> None: else: with file.open("w", encoding="utf-8") as f: f.write("#!/usr/bin/env bash\n\n") + f.write("# Use all available CPU cores for Polars operations\n") + f.write("export POLARS_MAX_THREADS=$(nproc)\n\n") f.write(f"LOGDIR={log_dir_name}\n") f.write(f"DATADIR={data_dir_name}\n\n") f.write("COLOR='\\033[1;32m'\n") @@ -992,6 +994,7 @@ def write_begin(file: io.TextIOWrapper) -> None: file: The file object to write to. """ file.write("# -*- coding: utf-8 -*-\n") + file.write("import polars as pl\n") file.write("\n") file.write("from odoo_data_flow.lib import mapper\n") file.write("from odoo_data_flow.lib.transform import Processor\n") @@ -1025,11 +1028,37 @@ def write_begin(file: io.TextIOWrapper) -> None: file.write(" # return header, data_new\n") file.write("\n") file.write( - f"processor = Processor(src_{model_mapped_name}, config_file=source_config_file, delimiter='{csv_delimiter}', preprocess=preprocess_{model_class_name})\n" + f"processor = Processor(src_{model_mapped_name}, dtypes={model_mapped_name}_schema, delimiter='{csv_delimiter}, preprocess=preprocess_{model_class_name}')\n\n" ) file.write("\n") +def odoo_type_to_polars_type(odoo_type: str) -> str: + """Maps Odoo field types to Polars DataType strings.""" + mapping = { + "char": "pl.Utf8", + "text": "pl.Utf8", + "html": "pl.Utf8", + "selection": "pl.Utf8", + "monetary": "pl.Float64", + "float": "pl.Float64", + "integer": "pl.Int64", + "boolean": "pl.Boolean", + "date": "pl.Date", + "datetime": "pl.Datetime", + "many2one": "pl.Utf8", + "many2one_reference": "pl.Int64", + "many2many": "pl.Utf8", + "binary": "pl.Utf8", + "json": "pl.Utf8", + "one2many": "pl.Utf8", + "properties": "pl.Utf8", + "properties_defenition": "pl.Utf8", + "reference": "pl.Utf8", + } + return mapping.get(odoo_type, "pl.Utf8") + + def write_end(file: io.TextIOWrapper) -> None: """Write the end of the generated python script. @@ -1059,13 +1088,14 @@ def write_end(file: io.TextIOWrapper) -> None: def write_mapping(file: io.TextIOWrapper) -> None: - """Write the fields mapping of the generated python script. + """Write the fields mapping and schema of the generated python script. Args: file: The file object to write to. """ if not dbname or offline: file.write(f"{model_mapping_name} = {{\n 'id': None,\n}}\n\n") + file.write(f"{model_mapped_name}_schema = None\n\n") return fields = load_fields() @@ -1092,6 +1122,14 @@ def field_filter(f: ModelField) -> bool: fields, key=lambda f: ((f.name != "id"), not f.is_required(), f.name) ) + if skeleton in ("dict", "map"): + # --- Write the Schema Dictionary --- + file.write(f"{model_mapped_name}_schema = {{\n") + for f in fields: + polars_type = odoo_type_to_polars_type(f.type) + file.write(f" '{f.get_mapping_name()}': {polars_type},\n") + file.write("}\n\n") + if skeleton == "dict": file.write(f"{model_mapping_name} = {{\n") for f in fields: From ef6a6a899ce3d1d81d881d0e006ac8a0d21ee797 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Wed, 16 Jul 2025 16:59:34 +0200 Subject: [PATCH 22/24] :sparkles: :zap: feat(scaffold): Generate Polars schema to optimize performance The scaffold tool now automatically generates a Polars schema dictionary (`dtypes`) in the model transformation scripts it creates. This schema is built by inspecting the field types in the source Odoo database. By providing this schema to the `Processor`, the CSV reading step can skip the slow type-inference process, resulting in a significant performance improvement for large files. Additionally, the generated `transform.sh` script now includes `export POLARS_MAX_THREADS` to ensure that Polars can leverage all available CPU cores for transformations, further boosting performance. --- odoo_dataflow_scaffold.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 8aad95e..d91e5ee 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -1028,7 +1028,7 @@ def write_begin(file: io.TextIOWrapper) -> None: file.write(" # return header, data_new\n") file.write("\n") file.write( - f"processor = Processor(src_{model_mapped_name}, dtypes={model_mapped_name}_schema, delimiter='{csv_delimiter}, preprocess=preprocess_{model_class_name}')\n\n" + f"processor = Processor(src_{model_mapped_name}, schema_overrides={model_mapped_name}_schema, delimiter='{csv_delimiter}, preprocess=preprocess_{model_class_name}')\n\n" ) file.write("\n") From b576a0980a0f55cc2786d8cd5822903492fec65e Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Wed, 16 Jul 2025 21:39:33 +0200 Subject: [PATCH 23/24] :bug: Fix field names in export shell script. Make sure the fields only contains a list of clean field names. --- odoo_dataflow_scaffold.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index d91e5ee..781b359 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -6,6 +6,7 @@ import io import os import platform +import re import socket import sys from pathlib import Path @@ -1406,27 +1407,33 @@ def create_export_script_file( mapping_end = mapper_content.find("}\n\n", mapping_start) if mapping_end == -1: - sys.stderr.write( - f"Error: Could not find end of mapping dictionary in {mapper_file_path}.\n" - ) - sys.exit(1) + # Fallback for dictionaries that might end differently + mapping_end = mapper_content.find("\n}", mapping_start) + if mapping_end != -1: + mapping_end += 2 # include the closing brace + else: + sys.stderr.write( + f"Error: Could not find end of mapping dictionary in {mapper_file_path}.\n" + ) + sys.exit(1) mapping_dict_str = mapper_content[ mapping_start + len(f"{mapping_var_name} = {{") : mapping_end ] + # Use a regular expression to reliably extract field names (dictionary keys) + # This pattern looks for a quoted string (the key) followed by a colon. + key_pattern = re.compile(r"^\s*['\"]([^'\"]+)['\"]\s*:") + for line in mapping_dict_str.splitlines(): line = line.strip() if line.startswith("#") or not line: continue - # Extract the key (field name) - try: - field_name = line.split(":")[0].strip().strip("'\"") - if field_name: - field_names.append(field_name) - except IndexError: - continue + match = key_pattern.match(line) + if match: + field_name = match.group(1) + field_names.append(field_name) if not field_names: sys.stderr.write( From 183397990e03273c70f7c73339924ab24c589619 Mon Sep 17 00:00:00 2001 From: bosd <5e2fd43-d292-4c90-9d1f-74ff3436329a@anonaddy.me> Date: Thu, 17 Jul 2025 12:59:48 +0200 Subject: [PATCH 24/24] :sparkles:feat(rpc): Add fallbacks for restricted model introspection The script would crash with an `odoo.exceptions.AccessError` when run by a user without administrative privileges. This was because it directly queried protected administrative models (`ir.model.fields`, `ir.model`, `ir.model.data`) to gather metadata for scaffolding. This commit introduces several fallback mechanisms to make the process more resilient and usable in environments with restricted permissions. The script now gracefully degrades functionality when access is denied, rather than crashing. The changes include: 1. **`load_fields()`**: Now attempts to fetch field data via `ir.model.fields` and, upon failure, falls back to a new `load_fields_from_model` function. This fallback uses the `fields_get()` method on the target model itself, which typically has less restrictive access. 2. **`model_exists()`**: No longer queries `ir.model`. It now confirms a model's existence and accessibility by attempting to retrieve the model proxy and perform a lightweight `fields_get()` call, all within a `try...except` block. 3. **`ModelField.get_info()`**: The lookup for related XML-IDs, which queries `ir.model.data`, is now wrapped in a `try...except` block. This makes the helpful feature an optional enhancement that is gracefully skipped if permissions are insufficient. --- odoo_dataflow_scaffold.py | 122 +++++++++++++++++++++++++++++++------- 1 file changed, 99 insertions(+), 23 deletions(-) diff --git a/odoo_dataflow_scaffold.py b/odoo_dataflow_scaffold.py index 781b359..04caa91 100755 --- a/odoo_dataflow_scaffold.py +++ b/odoo_dataflow_scaffold.py @@ -890,15 +890,21 @@ def get_info(self) -> str: if self.relation: info += f" -> {self.relation}" - # Add XMLID summary in field info - model_data = self.connection.get_model("ir.model.data") - external_prefixes = model_data.read_group( - [("model", "=", self.relation)], ["module"], ["module"] - ) - if external_prefixes: - info += " - with xml_id in module(s):" - for data in external_prefixes: - info += f" {data['module']}({data['module_count']})" + + # Wrap the ir.model.data access in a try...except block to handle AccessErrors. + try: + # Add XMLID summary in field info + model_data = self.connection.get_model("ir.model.data") + external_prefixes = model_data.read_group( + [("model", "=", self.relation)], ["module"], ["module"] + ) + if external_prefixes: + info += " - with xml_id in module(s):" + for data in external_prefixes: + info += f" {data['module']}({data['module_count']})" + except Exception: + # If we can't access ir.model.data, just skip this enhancement. + pass if self.selection: info += f"\n # SELECTION: {', '.join(self.selection)}" @@ -965,29 +971,90 @@ def is_required(self) -> bool: return self.required and not self.default_value -def load_fields() -> List[ModelField]: - """Build the model fields list, fetched as defined in the target database. +def load_fields_from_model() -> List[ModelField]: + """ + Fallback method to build model fields list by calling fields_get() on the model itself. + This avoids requiring direct access to 'ir.model.fields'. Returns: A list of ModelField objects. """ global has_tracked_fields global has_computed_fields + sys.stdout.write( + "INFO: Access to 'ir.model.fields' failed. Using fallback method 'fields_get()'.\n" + ) has_tracked_fields, has_computed_fields = False, False connection = conf_lib.get_connection_from_config(config) - model_fields = connection.get_model("ir.model.fields") - field_ids = model_fields.search([("model", "=", model)]) - fields = model_fields.read(field_ids) + try: + model_proxy = connection.get_model(model) + fields_properties = model_proxy.fields_get() + except Exception as e: + sys.stderr.write( + f"FATAL: Fallback method also failed. Could not get fields for model '{model}'. Error: {e}\n" + ) + sys.exit(1) + ret = [] - for field in fields: - f = ModelField(connection, field) + for field_name, properties in fields_properties.items(): + # The output of fields_get() is slightly different from a read on ir.model.fields. + # We need to map the keys to what the ModelField class expects. + field_data = { + "id": 0, # Not available via this method + "name": field_name, + "ttype": properties.get("type"), + "required": properties.get("required"), + "readonly": properties.get("readonly"), + "field_description": properties.get("string"), + "store": properties.get("store", True), + "track_visibility": properties.get("tracking"), + "related": properties.get("related"), + "relation": properties.get("relation"), + "depends": properties.get("depends"), + "compute": properties.get("compute"), + "selection": properties.get("selection"), + } + f = ModelField(connection, field_data) has_tracked_fields = has_tracked_fields or bool(f.track_visibility) - has_computed_fields = has_computed_fields or len(f.compute) > 1 + has_computed_fields = has_computed_fields or bool(f.compute) ret.append(f) return ret +def load_fields() -> List[ModelField]: + """Build the model fields list, fetched as defined in the target database. + + Returns: + A list of ModelField objects. + """ + global has_tracked_fields + global has_computed_fields + has_tracked_fields, has_computed_fields = False, False + connection = conf_lib.get_connection_from_config(config) + + try: + # Original method: try to access ir.model.fields + model_fields = connection.get_model("ir.model.fields") + field_ids = model_fields.search([("model", "=", model)]) + if not field_ids: + raise ValueError( + f"No fields found for model '{model}' via ir.model.fields." + ) + fields = model_fields.read(field_ids) + + ret = [] + for field in fields: + f = ModelField(connection, field) + has_tracked_fields = has_tracked_fields or bool(f.track_visibility) + has_computed_fields = has_computed_fields or len(f.compute) > 1 + ret.append(f) + return ret + except Exception: + # Fallback method: if the above fails, try the alternative + return load_fields_from_model() + + def write_begin(file: io.TextIOWrapper) -> None: """Write the beginning of the generated python script. @@ -1202,8 +1269,11 @@ def field_filter(f: ModelField) -> bool: def model_exists(model: str) -> bool: - """Return True if 'model' is scaffoldable. + """Model Ecistance check. + Return True if 'model' exists and is accessible by the current user. + This check does not require access rights to 'ir.model'. + Args: model: The model name to check. @@ -1211,11 +1281,17 @@ def model_exists(model: str) -> bool: True if the model exists, False otherwise. """ connection = conf_lib.get_connection_from_config(config) - model_model = connection.get_model("ir.model") - res = model_model.search_count( - [("model", "=", model), ("transient", "=", False)] - ) - return res != 0 + try: + # Get a proxy for the model directly. + model_proxy = connection.get_model(model) + # Perform a lightweight, safe operation to confirm it's real and accessible. + # Calling fields_get with a limited attribute set is very efficient. + model_proxy.fields_get(attributes=["id"]) + return True + except Exception: + # This will catch any error, either from the model not existing + # or from the user not having access rights to it. + return False def scaffold_model() -> None: