diff --git a/.bumpversion.cfg b/.bumpversion.cfg index ddff031..484a2e9 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.2.2 +current_version = 0.3.0 commit = True tag = True diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..4f3c5ae --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,22 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version, and other tools you might need +build: + os: ubuntu-24.04 + tools: + python: "3.13" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/conf.py + +python: + install: + - method: pip + path: . + - requirements: docs/requirements.txt + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..0e4580b --- /dev/null +++ b/Makefile @@ -0,0 +1,10 @@ +.PHONY: docs tests + +TESTS_PATH?=tests + +docs: + rm -rf docs/_build && $(MAKE) -C docs html + +tests: + PYTHONPATH=. pytest -s -vvvv -x $(TESTS_PATH) + diff --git a/README.md b/README.md index c1e22c5..92192ab 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ [![PyPI - Version](https://img.shields.io/pypi/v/sqlalchemy-memory)](https://pypi.org/project/sqlalchemy-memory/) [![CI](https://github.com/rundef/sqlalchemy-memory/actions/workflows/ci.yml/badge.svg)](https://github.com/rundef/sqlalchemy-memory/actions/workflows/ci.yml) +[![Documentation](https://app.readthedocs.org/projects/sqlalchemy-memory/badge/?version=latest)](https://sqlalchemy-memory.readthedocs.io/en/latest/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/sqlalchemy-memory)](https://pypistats.org/packages/sqlalchemy-memory) @@ -34,6 +35,7 @@ Data is kept purely in RAM and is **volatile**: it is **not persisted across app - **SQLAlchemy 2.0 support**: ORM & Core expressions, sync & async modes - **Zero I/O overhead**: pure in‑RAM storage (`dict`/`list` under the hood) - **Commit/rollback support** +- **Index support**: indexes are recognized and used for faster lookups - **Merge and `get()` support**: like real SQLAlchemy behavior ## Installation @@ -42,104 +44,10 @@ Data is kept purely in RAM and is **volatile**: it is **not persisted across app pip install sqlalchemy-memory ``` -## Quickstart +## Documentation -```python -from sqlalchemy import create_engine, select -from sqlalchemy.orm import sessionmaker, declarative_base, Mapped, mapped_column -from sqlalchemy_memory import MemorySession +[See the official documentation for usage examples](https://sqlalchemy-memory.readthedocs.io/en/latest/) -engine = create_engine("memory://") -Session = sessionmaker( - engine, - class_=MemorySession, - expire_on_commit=False, -) - -Base = declarative_base() - -class Item(Base): - __tablename__ = "items" - id: Mapped[int] = mapped_column(primary_key=True) - name: Mapped[str] = mapped_column() - def __repr__(self): - return f"Item(id={self.id} name={self.name})" - -Base.metadata.create_all(engine) - -# Use just like any other SQLAlchemy engine: -session = Session() - -# Add & commit -item = Item(id=1, name="foo") -session.add(item) -session.commit() - -# Query (no SQL under the hood: objects come straight back) -items = session.scalars(select(Item)).all() -print("Items", items) -assert items[0] is item -assert items[0].name == "foo" - -# Delete & commit -session.delete(item) -session.commit() - -# Confirm gone -assert session.scalars(select(Item)).all() == [] -``` - -## Quickstart (async) - -```python -import asyncio -from sqlalchemy import select -from sqlalchemy.ext.asyncio import create_async_engine -from sqlalchemy.orm import sessionmaker, declarative_base, Mapped, mapped_column -from sqlalchemy_memory import MemorySession, AsyncMemorySession - -engine = create_async_engine("memory+asyncio://") -Session = sessionmaker( - engine, - class_=AsyncMemorySession, - sync_session_class=MemorySession, - expire_on_commit=False, -) - -Base = declarative_base() - -class Item(Base): - __tablename__ = "items" - id: Mapped[int] = mapped_column(primary_key=True) - name: Mapped[str] = mapped_column() - - def __repr__(self): - return f"Item(id={self.id} name={self.name})" - -Base.metadata.create_all(engine.sync_engine) - -async def main(): - async with Session() as session: - # Add & commit - item = Item(id=1, name="foo") - session.add(item) - await session.commit() - - # Query (no SQL under the hood: objects come straight back) - items = (await session.scalars(select(Item))).all() - print("Items", items) - assert items[0] is item - assert items[0].name == "foo" - - # Delete & commit - await session.delete(item) - await session.commit() - - # Confirm gone - assert (await session.scalars(select(Item))).all() == [] - -asyncio.run(main()) -``` ## Status @@ -155,11 +63,13 @@ Coming soon: - Joins and relationships (limited) +- Compound indexes + - Better expression support in `update(...).values()` (e.g., +=) ## Testing -Simply run `pytest` +Simply run `make tests` ## License diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/commit_rollback.rst b/docs/commit_rollback.rst new file mode 100644 index 0000000..e55f30b --- /dev/null +++ b/docs/commit_rollback.rst @@ -0,0 +1,38 @@ +Commit / Rollback +================= + +sqlalchemy-memory fully supports transactional behavior, including commit and rollback operations. Changes are staged until committed, and can be safely reverted using rollback(). + +Commit +------ + +.. code-block:: python + + with SessionFactory() as session: + session.add(Item(id=1, name="foo")) + session.commit() + + item = session.get(Item, 1) + print(item.name) # foo + item.name = "updated" + session.commit() + + print(item.name) # updated + +Rollback +-------- + +Use `rollback()` to undo uncommitted changes: + +.. code-block:: python + + with SessionFactory() as session: + session.add(Item(id=1, name="foo")) + session.commit() + + item = session.get(Item, 1) + print(item.name) # foo + item.name = "updated" + session.rollback() + + print(item.name) # foo diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..d9d719c --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,53 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html +import os +import sys +sys.path.insert(0, os.path.abspath('..')) + + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = 'sqlalchemy_memory' +copyright = '2025, rundef' +author = 'rundef' + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", # for Google/NumPy-style docstrings + "sphinx.ext.viewcode", + "sphinx.ext.autosummary", +] +#autosummary_generate = True + +templates_path = ['_templates'] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + + + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +#html_theme = 'furo' +#html_static_path = ['_static'] + + +html_theme = 'sphinx_rtd_theme' +html_theme_options = { + 'logo_only': False, + 'prev_next_buttons_location': 'bottom', + # Toc options + 'collapse_navigation': True, + 'sticky_navigation': True, + 'navigation_depth': 4, + 'includehidden': True, + 'titles_only': False, + 'navigation_with_keys': True, + 'style_external_links': True, +} + diff --git a/docs/delete.rst b/docs/delete.rst new file mode 100644 index 0000000..4fa2bbb --- /dev/null +++ b/docs/delete.rst @@ -0,0 +1,12 @@ +Delete +====== + +Deleting objects from the memory store uses standard SQLAlchemy syntax. + +.. code-block:: python + + session.delete(obj) # orm style + + stmt = delete(Item).where(Item.id < 3) # core style + result = session.execute(stmt) + print(result.rowcount) diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..cfb4bca --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,115 @@ +Welcome to sqlalchemy-memory's documentation! +============================================= + +`sqlalchemy-memory` is a pure in-memory backend for SQLAlchemy 2.0 that supports both sync and async modes, with full compatibility for SQLAlchemy Core and ORM. + +Quickstart: sync example +------------------------ + +.. code-block:: python + + from sqlalchemy import create_engine, select + from sqlalchemy.orm import sessionmaker, declarative_base, Mapped, mapped_column + from sqlalchemy_memory import MemorySession + + engine = create_engine("memory://") + Session = sessionmaker( + engine, + class_=MemorySession, + expire_on_commit=False, + ) + + Base = declarative_base() + + class Item(Base): + __tablename__ = "items" + id: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column() + def __repr__(self): + return f"Item(id={self.id} name={self.name})" + + Base.metadata.create_all(engine) + + # Use just like any other SQLAlchemy engine: + session = Session() + + # Add & commit + item = Item(id=1, name="foo") + session.add(item) + session.commit() + + # Query (no SQL under the hood: objects come straight back) + items = session.scalars(select(Item)).all() + print("Items", items) + assert items[0] is item + assert items[0].name == "foo" + + # Delete & commit + session.delete(item) + session.commit() + + # Confirm gone + assert session.scalars(select(Item)).all() == [] + +Quickstart: async example +------------------------- + +.. code-block:: python + + import asyncio + from sqlalchemy import select + from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy.orm import sessionmaker, declarative_base, Mapped, mapped_column + from sqlalchemy_memory import MemorySession, AsyncMemorySession + + engine = create_async_engine("memory+asyncio://") + Session = sessionmaker( + engine, + class_=AsyncMemorySession, + sync_session_class=MemorySession, + expire_on_commit=False, + ) + + Base = declarative_base() + + class Item(Base): + __tablename__ = "items" + id: Mapped[int] = mapped_column(primary_key=True) + name: Mapped[str] = mapped_column() + + def __repr__(self): + return f"Item(id={self.id} name={self.name})" + + Base.metadata.create_all(engine.sync_engine) + + async def main(): + async with Session() as session: + # Add & commit + item = Item(id=1, name="foo") + session.add(item) + await session.commit() + + # Query (no SQL under the hood: objects come straight back) + items = (await session.scalars(select(Item))).all() + print("Items", items) + assert items[0] is item + assert items[0].name == "foo" + + # Delete & commit + await session.delete(item) + await session.commit() + + # Confirm gone + assert (await session.scalars(select(Item))).all() == [] + + asyncio.run(main()) + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + insert + query + update + delete + commit_rollback \ No newline at end of file diff --git a/docs/insert.rst b/docs/insert.rst new file mode 100644 index 0000000..7a6fe4f --- /dev/null +++ b/docs/insert.rst @@ -0,0 +1,69 @@ +Insert / Add +============ + +Inserting objects into the memory store uses standard SQLAlchemy syntax. + +.. code-block:: python + + # orm style + session.add(Item(id=1, name="Hello")) + + session.add_all([ + Item(id=1, name="Hello"), + Item(id=2, name="World") + ]) + + # core style + result = session.execute(insert(Item).values(name="Hello")) + print(result.rowcount) + + result = session.execute( + insert(Item), + [dict(name="Hello"), dict(name="World")] + ) + print(result.rowcount) + +You can also use `session.add_all([...])` for bulk inserts. + +Primary Keys +------------ + +Auto-incrementing primary keys are fully supported for integer columns. You can also manually specify primary keys when needed. + +.. code-block:: python + + with SessionFactory() as session: + session.add(Item(name="foo")) # Auto-assigned id = 1 + session.add(Item(id=90, name="bar")) # Manually set id = 90 + session.add(Item(name="foobar")) # Auto-assigned id = 91 + session.commit() + + items = session.scalars(select(Item)).all() + results = {item.id: item.name for item in items} + print(results) # {1: 'foo', 90: 'bar', 91: 'foobar'} + +Relationships +------------- + +`sqlalchemy-memory` does not implement automatic behavior for relationships. You are responsible for managing relationship fields manually, such as setting foreign keys and related objects. + +Note that while the `relationship()` declaration is allowed (and used by the ORM), relationship loading, cascades, and lazy-loading behavior are **not implemented**. + +.. code-block:: python + + class Product(Base): + __tablename__ = "products" + + id = Column(Integer, primary_key=True) + category_id = Column(Integer, ForeignKey("categories.id")) + + category = relationship("Category") + + with SessionFactory() as session: + session.add(Product(name="foo", category_id=2, category=category)) + + +Constraints and Validation +-------------------------- + +Constraints such as unique, nullable, or custom check conditions declared on the model are not enforced by the memory store. No validation errors will be raised for constraint violations. \ No newline at end of file diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..32bb245 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/query.rst b/docs/query.rst new file mode 100644 index 0000000..a972800 --- /dev/null +++ b/docs/query.rst @@ -0,0 +1,26 @@ +Query +===== + +Supported Operators +------------------- + +- Comparison: `==`, `!=`, `<`, `>`, `<=`, `>=`, `between`, `not between` +- Membership: `in`, `not in` +- Identity: `is`, `is not` +- Logical: `and`, `or` +- String: `like`, `not like` + +Supported Functions +------------------- + +- `DATE(column)` +- `func.json_extract(col, '$.expr')` + +Indexes +------- + +- Indexes are supported for single columns. + +- Compound (multi-column) indexes are not supported. + +**SELECT** queries are optimized using available indexes to speed up equality and range-based lookups. \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..6afd0fd --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,3 @@ +sphinx>=6.0 +sphinx-rtd-theme>=1.0 +sphinx-autodoc-typehints>=1.24 diff --git a/docs/update.rst b/docs/update.rst new file mode 100644 index 0000000..9ae1f4f --- /dev/null +++ b/docs/update.rst @@ -0,0 +1,27 @@ +Update +====== + +Updates in `sqlalchemy-memory` follow standard SQLAlchemy semantics. You can modify objects directly via the ORM or use `update()` expressions with `session.execute()`. + +.. code-block:: python + + with SessionFactory.begin() as session: + session.add(Item(id=1, name="foo")) + session.commit() + + item = session.get(Item, 1) + item.name = "updated" + + with SessionFactory() as session: + item = session.get(Item, 1) + print(item.name) # updated + + session.execute( + update(Item) + .where(Item.id == 1) + .values(name="bar") + ) + session.commit() + + item = session.get(Item, 1) + print(item.name) # bar diff --git a/pyproject.toml b/pyproject.toml index 286b33f..cca6509 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,9 +4,10 @@ build-backend = "setuptools.build_meta" [project] name = "sqlalchemy-memory" -version = "0.2.2" +version = "0.3.0" dependencies = [ "sqlalchemy>=2.0,<3.0", + "sortedcontainers>=2.4.0" ] description = "In-memory SQLAlchemy 2.0 dialect for blazing‑fast prototyping." readme = "README.md" diff --git a/pytest.ini b/pytest.ini index 2f4c80e..3e7cf96 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,2 +1,6 @@ [pytest] asyncio_mode = auto +log_cli = true +log_cli_level = DEBUG +log_format = %(asctime)s [%(levelname)s] %(message)s +log_date_format = %H:%M:%S diff --git a/requirements.txt b/requirements.txt index 5004478..9893c14 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,2 @@ sqlalchemy>=2.0,<3.0 +sortedcontainers>=2.4.0 diff --git a/sqlalchemy_memory/__init__.py b/sqlalchemy_memory/__init__.py index 0712826..d1b3d78 100644 --- a/sqlalchemy_memory/__init__.py +++ b/sqlalchemy_memory/__init__.py @@ -6,4 +6,4 @@ "AsyncMemorySession", ] -__version__ = '0.2.2' \ No newline at end of file +__version__ = '0.3.0' \ No newline at end of file diff --git a/sqlalchemy_memory/base/dialect.py b/sqlalchemy_memory/base/dialect.py index 9486af8..4ff9454 100644 --- a/sqlalchemy_memory/base/dialect.py +++ b/sqlalchemy_memory/base/dialect.py @@ -1,8 +1,19 @@ from sqlalchemy.engine import URL, default +from sqlalchemy import event +from sqlalchemy.orm import Mapper import types - +import contextvars from .connection import MemoryDBAPIConnection from .store import InMemoryStore +from ..logger import logger + +_current_store = contextvars.ContextVar("current_store") + +def set_current_store(store): + _current_store.set(store) + +def get_current_store(): + return _current_store.get() class MemoryDialect(default.DefaultDialect): name = "memory" @@ -16,6 +27,27 @@ class MemoryDialect(default.DefaultDialect): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._store = InMemoryStore() + set_current_store(self._store) + + @event.listens_for(Mapper, "mapper_configured") + def auto_attach_tracking(_, class_): + logger.debug(f"Attaching tracking to class {class_}") + + for column in class_.__table__.columns: + event.listen( + getattr(class_, column.name), + "set", + self._track_field_change_listener, + retval=False, + ) + + def _track_field_change_listener(self, *a, **kw): + try: + store = get_current_store() + except LookupError: + return + + store._track_field_change_listener(*a, **kw) def initialize(self, connection): super().initialize(connection) diff --git a/sqlalchemy_memory/base/indexes.py b/sqlalchemy_memory/base/indexes.py new file mode 100644 index 0000000..9ea1400 --- /dev/null +++ b/sqlalchemy_memory/base/indexes.py @@ -0,0 +1,253 @@ +from collections import defaultdict +from sortedcontainers import SortedDict +from typing import Any, List +from sqlalchemy.sql import operators + + +class IndexManager: + def __init__(self): + self.hash_index = HashIndex() + self.range_index = RangeIndex() + + self.table_indexes = {} + self.columns_mapping = {} + + def get_indexes(self, obj): + """ + Retrieve index from object's table as dict: indexname => list of column name + """ + tablename = obj.__tablename__ + + if tablename not in self.table_indexes: + self.table_indexes[tablename] = {} + + for index in obj.__table__.indexes: + if len(index.expressions) > 1: + # Ignoring compound indexes for now ... + continue + + self.table_indexes[tablename][index.name] = [ + col.name + for col in index.expressions + ] + + return self.table_indexes[tablename] + + def _column_to_index(self, tablename, colname): + """ + Get index name from tablename & column name + """ + if tablename not in self.columns_mapping: + self.columns_mapping[tablename] = {} + + if colname not in self.columns_mapping[tablename]: + for indexname, indexcols in self.table_indexes.get(tablename, {}).items(): + if colname in indexcols: + self.columns_mapping[tablename][colname] = indexname + return indexname + + self.columns_mapping[tablename][colname] = None + + + return self.columns_mapping[tablename][colname] + + def _get_index_key(self, obj, columns): + if len(columns) == 1: + return getattr(obj, columns[0]) + return tuple(getattr(obj, c) for c in columns) + + def on_insert(self, obj): + tablename = obj.__tablename__ + indexes = self.get_indexes(obj) + + for indexname, columns in indexes.items(): + value = self._get_index_key(obj, columns) + + self.hash_index.add(tablename, indexname, value, obj) + self.range_index.add(tablename, indexname, value, obj) + + def on_delete(self, obj): + tablename = obj.__tablename__ + indexes = self.get_indexes(obj) + + for indexname, columns in indexes.items(): + value = self._get_index_key(obj, columns) + + self.hash_index.remove(tablename, indexname, value, obj) + self.range_index.remove(tablename, indexname, value, obj) + + def on_update(self, obj, updates): + tablename = obj.__tablename__ + indexes = self.get_indexes(obj) + + for indexname, columns in indexes.items(): + if columns[0] not in updates: + continue + + old_value = updates[columns[0]]["old"] + new_value = updates[columns[0]]["new"] + + self.hash_index.remove(tablename, indexname, old_value, obj) + self.range_index.remove(tablename, indexname, old_value, obj) + + self.hash_index.add(tablename, indexname, new_value, obj) + self.range_index.add(tablename, indexname, new_value, obj) + + def query(self, collection, tablename, colname, operator, value): + indexname = self._column_to_index(tablename, colname) + if not indexname: + return None + + # Use hash index for = / != / IN / NOT IN operators + if operator == operators.eq: + result = self.hash_index.query(tablename, indexname, value) + return list(set(result) & set(collection)) + + elif operator == operators.ne: + # All values except the given one + excluded = self.hash_index.query(tablename, indexname, value) + return list(set(collection) - set(excluded)) + + elif operator == operators.in_op: + result = [] + for v in value: + result.extend(self.hash_index.query(tablename, indexname, v)) + return list(set(result) & set(collection)) + + elif operator == operators.notin_op: + excluded = [] + for v in value: + excluded.extend(self.hash_index.query(tablename, indexname, v)) + return list(set(collection) - set(excluded)) + + # Use range index + if operator == operators.gt: + result = self.range_index.query(tablename, indexname, gt=value) + return list(set(result) & set(collection)) + + elif operator == operators.ge: + result = self.range_index.query(tablename, indexname, gte=value) + return list(set(result) & set(collection)) + + elif operator == operators.lt: + result = self.range_index.query(tablename, indexname, lt=value) + return list(set(result) & set(collection)) + + elif operator == operators.le: + result = self.range_index.query(tablename, indexname, lte=value) + return list(set(result) & set(collection)) + + elif operator == operators.between_op and isinstance(value, (tuple, list)) and len(value) == 2: + result = self.range_index.query(tablename, indexname, gte=value[0], lte=value[1]) + return list(set(result) & set(collection)) + + elif operator == operators.not_between_op and isinstance(value, (tuple, list)) and len(value) == 2: + in_range = self.range_index.query(tablename, indexname, gte=value[0], lte=value[1]) + return list(set(collection) - set(in_range)) + + def get_selectivity(self, tablename, colname, operator, value, total_count): + """ + Estimate selectivity: higher means worst filtering. + """ + + indexname = self._column_to_index(tablename, colname) + if not indexname: + # Column isn't indexed + return total_count + + if indexname in self.hash_index.index[tablename]: + index = self.hash_index.index[tablename][indexname] + num_keys = len(index) + + if operator == operators.eq: + return len(index.get(value, [])) + + elif operator == operators.ne: + matched = len(index.get(value, [])) + return total_count - matched + + elif operator == operators.in_op: + return sum(len(index.get(v, [])) for v in value) + + elif operator == operators.notin_op: + matched = sum(len(index.get(v, [])) for v in value) + return total_count - matched + + return total_count / num_keys + + return total_count + +class HashIndex: + """ + A hash-based index structure for fast exact-match lookups on table columns. + + Structure: + index[tablename][indexname][value] = [obj1, obj2, ...] + + Maintains insertion order of objects. + """ + + def __init__(self): + self.index = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) + + def add(self, tablename: str, indexname: str, value: Any, obj: Any): + self.index[tablename][indexname][value].append(obj) + + def remove(self, tablename: str, indexname: str, value: Any, obj: Any): + lst = self.index[tablename][indexname][value] + try: + lst.remove(obj) + if not lst: + del self.index[tablename][indexname][value] + except ValueError: + pass + + def query(self, tablename: str, indexname: str, value: Any) -> List[Any]: + return self.index[tablename][indexname].get(value, []) + + +class RangeIndex: + """ + A range-based index for fast lookups using comparison operators. + + Internally uses SortedDict to allow efficient bisecting and slicing. + Structure: + index[tablename][indexname] = SortedDict { value: [obj1, obj2, ...] } + """ + + def __init__(self): + self.index = defaultdict(lambda: defaultdict(SortedDict)) + + def add(self, tablename: str, indexname: str, value: Any, obj: Any): + self.index[tablename][indexname].setdefault(value, []).append(obj) + + def remove(self, tablename: str, indexname: str, value: Any, obj: Any): + col = self.index[tablename][indexname] + if value in col: + try: + col[value].remove(obj) + if not col[value]: + del col[value] + except ValueError: + pass + + def query(self, tablename: str, indexname: str, gt=None, gte=None, lt=None, lte=None) -> List[Any]: + sd = self.index[tablename][indexname] + + # Define range bounds + min_key = gte if gte is not None else gt + max_key = lte if lte is not None else lt + inclusive_min = gte is not None + inclusive_max = lte is not None + + irange = sd.irange( + minimum=min_key, + maximum=max_key, + inclusive=(inclusive_min, inclusive_max) + ) + + result = [] + for key in irange: + result.extend(sd[key]) + + return result diff --git a/sqlalchemy_memory/base/pending_changes.py b/sqlalchemy_memory/base/pending_changes.py new file mode 100644 index 0000000..d3851ea --- /dev/null +++ b/sqlalchemy_memory/base/pending_changes.py @@ -0,0 +1,58 @@ +from collections import defaultdict + +class PendingChanges: + def __init__(self): + self._to_add = defaultdict(list) + self._to_delete = defaultdict(list) + self._to_update = defaultdict(list) + + # Modifications done by the user, e.g.: instance.counter += 1 + self._modifications = defaultdict(dict) + + def clear(self): + self.rollback() + + def rollback(self): + self._to_add.clear() + self._to_delete.clear() + self._to_update.clear() + self._modifications.clear() + + def add(self, obj, **kwargs): + tablename = obj.__tablename__ + if not any(id(x) == id(obj) for x in self._to_add[tablename]): + self._to_add[tablename].append(obj) + + def delete(self, obj): + tablename = obj.__tablename__ + self._to_delete[tablename].append(obj) + + def update(self, tablename, pk_value, data): + self._to_update[tablename].append((pk_value, data)) + + @property + def dirty(self): + return bool(self._to_add or self._to_delete or self._to_update or self._modifications) + + def flush_to(self, target): + to_transfer = [ + "_to_add", + "_to_update", + "_to_delete", + ] + for key in to_transfer: + item = getattr(self, key) + if not item: + continue + setattr(target, key, item.copy()) + item.clear() + + def mark_field_as_dirty(self, instance, colname, oldvalue, value): + key = id(instance) + if key not in self._modifications: + self._modifications[key]["__instance"] = instance + + if colname in self._modifications[key]: + self._modifications[key][colname][1] = value + else: + self._modifications[key][colname] = [oldvalue, value] diff --git a/sqlalchemy_memory/base/query.py b/sqlalchemy_memory/base/query.py index 2b92de5..801a523 100644 --- a/sqlalchemy_memory/base/query.py +++ b/sqlalchemy_memory/base/query.py @@ -29,8 +29,8 @@ } class MemoryQuery(Query): - def __init__(self, entities, element): - super().__init__(entities, element) + def __init__(self, entities, session): + super().__init__(entities, session) self._model = entities[0] self._where_criteria = [] @@ -38,6 +38,10 @@ def __init__(self, entities, element): self._limit = None self._offset = None + @property + def store(self): + return self.session.store + @cached_property def tablename(self): if isinstance(self._model, AnnotatedTable): @@ -88,25 +92,27 @@ def _apply_boolean_condition(self, cond: BooleanClauseList, collection): return list(result) - def _apply_binary_condition(self, cond: BinaryExpression, collection): - # Extract the Python value it's being compared to - rhs = cond.right + def _resolve_rhs(self, rhs): if isinstance(rhs, BindParameter): - value = rhs.value + return rhs.value elif isinstance(rhs, True_): - value = True + return True elif isinstance(rhs, False_): - value = False + return False elif isinstance(rhs, Null): - value = None + return None elif isinstance(rhs, ExpressionClauseList): - value = tuple( + return tuple( clause.value if isinstance(clause, BindParameter) else clause for clause in rhs.clauses ) else: raise NotImplementedError(f"Unsupported RHS: {type(rhs)}") + def _apply_binary_condition(self, cond: BinaryExpression, collection): + # Extract the Python value it's being compared to + value = self._resolve_rhs(cond.right) + col = cond.left accessor = lambda obj, attr_name: getattr(obj, attr_name) @@ -131,6 +137,11 @@ def _apply_binary_condition(self, cond: BinaryExpression, collection): op = cond.operator + # Use index if available + index_result = self.store.query_index(collection, table_name, attr_name, op, value) + if index_result is not None: + return index_result + if op in OPERATOR_ADAPTERS: op = OPERATOR_ADAPTERS[op](value) @@ -155,13 +166,15 @@ def _apply_condition(self, cond, collection): raise NotImplementedError(f"Unsupported condition type: {type(cond)}") def _execute_query(self): - collection = self.session.store.data.get(self.tablename, []) + collection = self.store.data.get(self.tablename, []) if not collection: logger.debug(f"Table '{self.tablename}' is empty") return collection # Apply conditions - for condition in self._where_criteria: + conditions = sorted(self._where_criteria, key=self._get_condition_selectivity) + + for condition in conditions: collection = self._apply_condition(condition, collection) if len(collection) == 0: @@ -192,3 +205,26 @@ def _execute_query(self): collection = collection[:self._limit] return collection + + def _get_condition_selectivity(self, cond): + total_count = self.store.count(self.tablename) + + if not isinstance(cond, BinaryExpression): + return total_count + + col = cond.left + if isinstance(col, FunctionElement): + return total_count + + if not hasattr(col, "name"): + return total_count + + value = self._resolve_rhs(cond.right) + + return self.store.index_manager.get_selectivity( + tablename=self.tablename, + colname=col.name, + operator=cond.operator, + value=value, + total_count=total_count + ) diff --git a/sqlalchemy_memory/base/session.py b/sqlalchemy_memory/base/session.py index 4eb3109..215c1f0 100644 --- a/sqlalchemy_memory/base/session.py +++ b/sqlalchemy_memory/base/session.py @@ -4,12 +4,11 @@ from sqlalchemy.engine import IteratorResult from sqlalchemy.engine.cursor import SimpleResultMetaData from functools import lru_cache -from collections import defaultdict from .query import MemoryQuery +from .pending_changes import PendingChanges from ..logger import logger - class MemorySession(Session): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -17,49 +16,23 @@ def __init__(self, *args, **kwargs): self._has_pending_merge = False self.store = self.get_bind().dialect._store - # Non-committed inserts/deletes/updates - self._to_add = defaultdict(list) - self._to_delete = defaultdict(list) - self._to_update = defaultdict(list) - - self._fetched = defaultdict(dict) + # Non-flushed changes + self.pending_changes = PendingChanges() def add(self, obj, **kwargs): - tablename = obj.__tablename__ - if not any(id(x) == id(obj) for x in self._to_add[tablename]): - self._to_add[tablename].append(obj) + self.pending_changes.add(obj, **kwargs) def delete(self, obj): - tablename = obj.__tablename__ - self._to_delete[tablename].append(obj) + self.pending_changes.delete(obj) def update(self, tablename, pk_value, data): - self._to_update[tablename].append((pk_value, data)) - - def _mark_as_fetched(self, instance): - tablename = instance.__tablename__ - - pk_name = self.store._get_primary_key_name(instance) - pk_value = getattr(instance, pk_name) - - if pk_value in self._fetched[tablename]: - # Don't mark as fetched again - return - - original_values = { - col.name: getattr(instance, col.name) - for col in instance.__table__.columns - } - self._fetched[tablename][pk_value] = original_values + self.pending_changes.update(tablename, pk_value, data) def get(self, entity, id, **kwargs): """ Return an instance based on the given primary key identifier, or ``None`` if not found. """ - instance = self.store.get_by_primary_key(entity, id) - if instance: - self._mark_as_fetched(instance) - return instance + return self.store.get_by_primary_key(entity, id) def scalars(self, statement, **kwargs): return self.execute(statement, **kwargs).scalars() @@ -104,9 +77,6 @@ def _handle_select(self, statement: Select, **kwargs): objs = q.all() - for obj in objs: - self._mark_as_fetched(obj) - # Wrap each object in a single‑element tuple, so .scalars() yields it wrapped = ((obj,) for obj in objs) @@ -223,7 +193,6 @@ def merge(self, instance, **kwargs): existing = self.store.get_by_primary_key(instance, pk_value) if existing: - self._mark_as_fetched(existing) self._has_pending_merge = True for column in instance.__table__.columns: @@ -241,7 +210,7 @@ def merge(self, instance, **kwargs): @property def dirty(self): - return bool(self._to_add or self._to_delete or self._to_update) or self._has_pending_merge + return self.pending_changes.dirty or self._has_pending_merge def _is_clean(self): return not self.dirty @@ -250,31 +219,15 @@ def flush(self, objects=None): if not self._transaction or not self._transaction._connections: self.connection() # Ensure a real connection is created - to_transfer = [ - "_to_add", - "_to_update", - "_to_delete", - "_fetched", - ] - for key in to_transfer: - item = getattr(self, key) - if not item: - continue - setattr(self.store, key, item.copy()) - item.clear() + self.pending_changes.flush_to(self.store.pending_changes) def rollback(self, **kwargs): logger.debug("Rolling back ...") - self.store._fetched = self._fetched self.store.rollback() self._has_pending_merge = False - - self._to_add.clear() - self._to_delete.clear() - self._to_update.clear() - self._fetched.clear() + self.pending_changes.rollback() def commit(self): diff --git a/sqlalchemy_memory/base/store.py b/sqlalchemy_memory/base/store.py index 728e7c5..bb68af2 100644 --- a/sqlalchemy_memory/base/store.py +++ b/sqlalchemy_memory/base/store.py @@ -1,9 +1,12 @@ from collections import defaultdict from sqlalchemy import func from sqlalchemy.sql.elements import TextClause +from sqlalchemy.orm.attributes import NEVER_SET, NO_VALUE, LoaderCallableStatus from datetime import datetime from ..logger import logger +from .pending_changes import PendingChanges +from .indexes import IndexManager class InMemoryStore: def __init__(self): @@ -13,23 +16,23 @@ def _reset(self): self.data = defaultdict(list) self.data_by_pk = defaultdict(dict) - # Non-committed inserts/deletes/updates - self._to_add = {} - self._to_delete = {} - self._to_update = {} + self.index_manager = IndexManager() - self._fetched = {} + # Non-committed changes + self.pending_changes = PendingChanges() # Auto increment counter per table self._pk_counter = defaultdict(int) @property def dirty(self): - return bool(self._to_add or self._to_delete or self._to_update) + return self.pending_changes.dirty def commit(self): + self.update_modified_items_indexes() + # apply deletes - for tablename, objs in self._to_delete.items(): + for tablename, objs in self.pending_changes._to_delete.items(): if not objs: continue @@ -49,8 +52,12 @@ def commit(self): for pk_value in pk_values: del self.data_by_pk[tablename][pk_value] + # Update indexes + for obj in objs: + self.index_manager.on_delete(obj) + # apply adds - for tablename, objs in self._to_add.items(): + for tablename, objs in self.pending_changes._to_add.items(): if tablename not in self.data: self.data[tablename] = [] @@ -65,37 +72,35 @@ def commit(self): self.data[tablename].append(obj) self.data_by_pk[tablename][pk_value] = obj + self.index_manager.on_insert(obj) # apply updates - for tablename, updates in self._to_update.items(): + for tablename, updates in self.pending_changes._to_update.items(): for pk_value, data in updates: if pk_value not in self.data_by_pk[tablename].keys(): raise Exception(f"Could not find item with PK value {pk_value} in table '{tablename}'") logger.debug(f"Updating table '{tablename}' where PK value={pk_value}: {data}") item = self.data_by_pk[tablename][pk_value] + + values = {} for k, v in data.items(): + values[k] = dict(old=getattr(item, k), new=v) setattr(item, k, v) - self._to_add.clear() - self._to_delete.clear() - self._to_update.clear() - self._fetched.clear() + # Update indexes + self.index_manager.on_update(item, values) - def rollback(self): - self._to_add.clear() - self._to_delete.clear() - self._to_update.clear() + self.pending_changes.clear() + def rollback(self): # Revert attributes changes - for tablename, fetched_objs in self._fetched.items(): - for pk_value, original_values in fetched_objs.items(): - obj = self.data_by_pk[tablename].get(pk_value) + for updates in self.pending_changes._modifications.values(): + instance = updates.pop("__instance") + for colname, (old_value, new_value) in updates.items(): + setattr(instance, colname, old_value) - for field, value in original_values.items(): - setattr(obj, field, value) - - self._fetched.clear() + self.pending_changes.rollback() def get_by_primary_key(self, entity, pk_value): tablename = entity.__tablename__ @@ -170,3 +175,36 @@ def _apply_column_defaults(self, obj): else: raise Exception(f"Unhandled server_default type: {type(column.server_default)}") + + def query_index(self, collection, table_name, attr_name, op, value): + result = self.index_manager.query(collection, table_name, attr_name, op, value) + if result is not None: + logger.debug(f"Reduced '{table_name}' dataset from {len(collection)} items to {len(result)} by using index on '{attr_name}") + return result + + def count(self, tablename): + return len(self.data[tablename]) + + def update_modified_items_indexes(self): + # update indexes of modified objects + for updates in self.pending_changes._modifications.values(): + instance = updates.pop("__instance") + + values = { + colname: dict(old=old_value, new=new_value) + for colname, (old_value, new_value) in updates.items() + if old_value != new_value + } + if not values: + continue + + # Update indexes + self.index_manager.on_update(instance, values) + + def _track_field_change_listener(self, target, value, oldvalue, initiator): + if oldvalue in (NO_VALUE, NEVER_SET, LoaderCallableStatus.NO_VALUE): + return + if oldvalue == value: + return + + self.pending_changes.mark_field_as_dirty(target, initiator.key, oldvalue, value) diff --git a/tests/models.py b/tests/models.py index 8a0a225..94c7917 100644 --- a/tests/models.py +++ b/tests/models.py @@ -16,9 +16,9 @@ def __repr__(self): class Product(Base): __tablename__ = "products" id: Mapped[int] = mapped_column(primary_key=True) - active: Mapped[bool] = mapped_column(default=True, index=True) + active: Mapped[bool] = mapped_column(default=True) name: Mapped[str] = mapped_column(nullable=False) - category: Mapped[str] = mapped_column(index=True, server_default=text("unknown")) + category: Mapped[str] = mapped_column(server_default=text("unknown")) data: Mapped[dict] = mapped_column(JSON) created_at: Mapped[datetime] = mapped_column( @@ -28,3 +28,14 @@ class Product(Base): def __repr__(self): return f"Product(id={self.id} name={self.name})" + +class ProductWithIndex(Base): + __tablename__ = "products_with_index" + id: Mapped[int] = mapped_column(primary_key=True) + active: Mapped[bool] = mapped_column(default=True, index=True) + name: Mapped[str] = mapped_column(nullable=False) + category: Mapped[str] = mapped_column(index=True, nullable=False) + price: Mapped[float] = mapped_column(default=True, index=True) + + def __repr__(self): + return f"ProductWithIndex(id={self.id} name={self.name})" diff --git a/tests/test_crud.py b/tests/test_crud.py index dc276ef..9a37e35 100644 --- a/tests/test_crud.py +++ b/tests/test_crud.py @@ -31,6 +31,7 @@ def test_insert(self, SessionFactory): assert items[2].id == 3 assert items[2].name == "fba" + def test_update(self, SessionFactory): with SessionFactory() as session: with session.begin(): diff --git a/tests/test_indexes.py b/tests/test_indexes.py new file mode 100644 index 0000000..17072d1 --- /dev/null +++ b/tests/test_indexes.py @@ -0,0 +1,207 @@ +import pytest +from unittest.mock import MagicMock +from sqlalchemy.sql import operators + +from sqlalchemy_memory.base.indexes import HashIndex, RangeIndex, IndexManager + +from models import ProductWithIndex + +class TestIndexes: + def test_hash_index(self): + index = HashIndex() + mock3 = MagicMock(id=3) + + index.add("table1", "activeIndex", True, MagicMock(id=1)) + index.add("table1", "activeIndex", False, MagicMock(id=2)) + index.add("table1", "activeIndex", False, mock3) + index.add("table1", "activeIndex", True, MagicMock(id=4)) + + index.add("table2", "activeIndex", True, MagicMock(id=100)) + + results = index.query("table1", "activeIndex", True) + assert {r.id for r in results} == {1, 4} + + results = index.query("table1", "activeIndex", False) + assert {r.id for r in results} == {2, 3} + + index.remove("table1", "activeIndex", False, mock3) + + results = index.query("table1", "activeIndex", False) + assert {r.id for r in results} == {2} + + def test_hash_compound_index(self): + index = HashIndex() + mock3 = MagicMock(id=3) + + index.add("table1", "active_category", (True, "A"), MagicMock(id=1)) + index.add("table1", "active_category", (True, "B"), MagicMock(id=2)) + index.add("table1", "active_category", (False, "A"), mock3) + index.add("table1", "active_category", (False, "B"), MagicMock(id=4)) + + results = index.query("table1", "active_category", (False, "A")) + assert {r.id for r in results} == {3} + + index.remove("table1", "active_category", (False, "A"), mock3) + + results = index.query("table1", "active_category", (False, "A")) + assert {r.id for r in results} == set() + + @pytest.mark.parametrize("query_kwargs,expected_ids", [ + ({"gt": 10}, {2, 3}), + ({"gte": 20}, {2, 3}), + ({"lt": 20}, {1}), + ({"lte": 20}, {1, 3}), + ({"gte": 15, "lte": 30}, {2, 3}), + ({"gt": 5, "lt": 25}, {1, 3}), + ({"gt": 10, "lte": 30}, {2, 3}), + ({"gte": 10, "lt": 30}, {1, 3}), + ({"gt": 30}, set()), + ({"lt": 10}, set()), + ({"lte": 10, "gt": 30}, set()), + ]) + def test_range_index(self, query_kwargs, expected_ids): + index = RangeIndex() + + objs = [ + MagicMock(id=1, price=10), + MagicMock(id=2, price=30), + MagicMock(id=3, price=20), + ] + + for obj in objs: + index.add("products", "price_index", obj.price, obj) + + results = index.query("products", "price_index", **query_kwargs) + assert {r.id for r in results} == expected_ids + + @pytest.mark.parametrize("query_kwargs,expected_ids", [ + # All ES assets + ({"gte": ("ES", -float("inf")), "lte": ("ES", float("inf"))}, {1, 2}), + + # ES assets with price > 10 + ({"gt": ("ES", 10), "lte": ("ES", float("inf"))}, {2}), + + # NQ assets with price <= 20 + ({"gte": ("NQ", -float("inf")), "lte": ("NQ", 20)}, {3}), + + # All between ("ES", 10) and ("NQ", 30) + ({"gte": ("ES", 10), "lte": ("NQ", 30)}, {1, 2, 3}), + + # Nothing between ("ES", 40) and ("ES", 50) + ({"gte": ("ES", 40), "lte": ("ES", 50)}, set()), + + # Full range + ({"gte": ("ES", -float("inf")), "lte": ("NQ", float("inf"))}, {1, 2, 3, 4}), + ]) + def test_compound_range_index(self, query_kwargs, expected_ids): + index = RangeIndex() + + objs = [ + MagicMock(id=1, asset="ES", price=10), + MagicMock(id=2, asset="ES", price=30), + MagicMock(id=3, asset="NQ", price=20), + MagicMock(id=4, asset="NQ", price=40), + ] + + for obj in objs: + key = (obj.asset, obj.price) + index.add("products", "asset_price_index", key, obj) + + results = index.query("products", "asset_price_index", **query_kwargs) + + assert {r.id for r in results} == expected_ids + + @pytest.mark.parametrize("operator,value,expected", [ + (operators.eq, "A", 3), + (operators.eq, "Z", 0), # "Z" not present + (operators.ne, "A", 7), + (operators.ne, "Z", 10), # "Z" not present + (operators.in_op, ["A", "B"], 6), + (operators.notin_op, ["A", "B"], 4), + ("fallback", None, 10 / 3), # 10/3 + ]) + def test_get_selectivity(self, operator, value, expected): + tablename = "products" + indexname = "ix_category" + colname = "category" + + index_manager = IndexManager() + index_manager.table_indexes = { + tablename: { + indexname: [colname] + } + } + + assert index_manager._column_to_index("nothing", "nothing") is None + assert index_manager._column_to_index(tablename, "nothing") is None + assert index_manager._column_to_index(tablename, colname) == indexname + + total_count = 10 + for category, count in zip(["A", "B", "C"], [3, 3, 4]): + for _ in range(count): + index_manager.hash_index.add(tablename, indexname, category, MagicMock()) + + result = index_manager.get_selectivity(tablename, colname, operator, value, total_count) + assert result == pytest.approx(expected) + + def test_synchronized_indexes(self, SessionFactory): + tablename = ProductWithIndex.__tablename__ + + with SessionFactory() as session: + session.add_all([ + ProductWithIndex( + id=1, + active=True, + name="Hello", + category="A", + price=100, + ), + ProductWithIndex( + id=2, + active=True, + name="World", + category="B", + price=200, + ), + ]) + session.commit() + + store = session.store + collection = store.data[tablename] + + assert len(store.query_index(collection, tablename, "active", operators.eq, True)) == 2 + assert len(store.query_index(collection, tablename, "active", operators.ne, True)) == 0 + assert len(store.query_index(collection, tablename, "active", operators.eq, False)) == 0 + assert len(store.query_index(collection, tablename, "active", operators.ne, False)) == 2 + + assert len(store.query_index(collection, tablename, "category", operators.eq, "A")) == 1 + assert len(store.query_index(collection, tablename, "category", operators.eq, "B")) == 1 + assert len(store.query_index(collection, tablename, "category", operators.eq, "Z")) == 0 + + # Assert nothing was changed on rollback + item = session.get(ProductWithIndex, 2) + item.category = "Z" + session.rollback() + assert len(store.query_index(collection, tablename, "category", operators.eq, "A")) == 1 + assert len(store.query_index(collection, tablename, "category", operators.eq, "B")) == 1 + assert len(store.query_index(collection, tablename, "category", operators.eq, "Z")) == 0 + + # Assert index was synchronized after update + item = session.get(ProductWithIndex, 2) + item.category = "Z" + session.commit() + + assert len(store.query_index(collection, tablename, "category", operators.eq, "A")) == 1 + assert len(store.query_index(collection, tablename, "category", operators.eq, "B")) == 0 + assert len(store.query_index(collection, tablename, "category", operators.eq, "Z")) == 1 + + # Assert nothing was changed on rollback + session.delete(collection[0]) + session.rollback() + assert len(store.query_index(collection, tablename, "active", operators.eq, True)) == 2 + + # Assert index was synchronized after deletion + session.delete(collection[0]) + session.commit() + assert len(store.query_index(collection, tablename, "active", operators.eq, True)) == 1 + assert len(store.query_index(collection, tablename, "active", operators.eq, False)) == 0