-
Notifications
You must be signed in to change notification settings - Fork 3
Description
Good morning,
we're trying to update ExaFS to version 1.1.7. We tried to run migrations according to https://github.com/CESNET/exafs/blob/main/docs/DB_MIGRATIONS.md, however, flask db upgrade failed. The generated migration looked like this:
"""init migration
Revision ID: 404c85389d1c
Revises:
Create Date: 2025-11-03 14:50:45.525311
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '404c85389d1c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('whitelist',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('ip', sa.String(length=255), nullable=True),
sa.Column('mask', sa.Integer(), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.Column('expires', sa.DateTime(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('org_id', sa.Integer(), nullable=False),
sa.Column('rstate_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['org_id'], ['organization.id'], ),
sa.ForeignKeyConstraint(['rstate_id'], ['rstate.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('rule_whitelist_cache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('rid', sa.Integer(), nullable=True),
sa.Column('rtype', sa.Integer(), nullable=True),
sa.Column('rorigin', sa.Integer(), nullable=True),
sa.Column('whitelist_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['whitelist_id'], ['whitelist.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('sessions', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('session_id'))
op.drop_table('sessions')
with op.batch_alter_table('RTBH', schema=None) as batch_op:
batch_op.add_column(sa.Column('org_id', sa.Integer(), nullable=False))
batch_op.create_foreign_key(None, 'organization', ['org_id'], ['id'])
with op.batch_alter_table('api_key', schema=None) as batch_op:
batch_op.add_column(sa.Column('org_id', sa.Integer(), nullable=False))
batch_op.create_foreign_key(None, 'organization', ['org_id'], ['id'])
with op.batch_alter_table('flowspec4', schema=None) as batch_op:
batch_op.add_column(sa.Column('org_id', sa.Integer(), nullable=False))
batch_op.create_foreign_key(None, 'organization', ['org_id'], ['id'])
with op.batch_alter_table('flowspec6', schema=None) as batch_op:
batch_op.add_column(sa.Column('org_id', sa.Integer(), nullable=False))
batch_op.create_foreign_key(None, 'organization', ['org_id'], ['id'])
with op.batch_alter_table('machine_api_key', schema=None) as batch_op:
batch_op.add_column(sa.Column('org_id', sa.Integer(), nullable=False))
batch_op.create_foreign_key(None, 'organization', ['org_id'], ['id'])
with op.batch_alter_table('organization', schema=None) as batch_op:
batch_op.add_column(sa.Column('limit_flowspec4', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('limit_flowspec6', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('limit_rtbh', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('organization', schema=None) as batch_op:
batch_op.drop_column('limit_rtbh')
batch_op.drop_column('limit_flowspec6')
batch_op.drop_column('limit_flowspec4')
with op.batch_alter_table('machine_api_key', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_column('org_id')
with op.batch_alter_table('flowspec6', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_column('org_id')
with op.batch_alter_table('flowspec4', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_column('org_id')
with op.batch_alter_table('api_key', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_column('org_id')
with op.batch_alter_table('RTBH', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_column('org_id')
op.create_table('sessions',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('session_id', mysql.VARCHAR(length=255), nullable=True),
sa.Column('data', sa.BLOB(), nullable=True),
sa.Column('expiry', mysql.DATETIME(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate='utf8mb4_general_ci',
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
with op.batch_alter_table('sessions', schema=None) as batch_op:
batch_op.create_index(batch_op.f('session_id'), ['session_id'], unique=True)
op.drop_table('rule_whitelist_cache')
op.drop_table('whitelist')
# ### end Alembic commands ###Problematic parts are:
with op.batch_alter_table('RTBH', schema=None) as batch_op:
batch_op.add_column(sa.Column('org_id', sa.Integer(), nullable=False))
batch_op.create_foreign_key(None, 'organization', ['org_id'], ['id'])This will correctly add the column to RTBH table, but create_foreign_key will fail because all records in RTBH table now have org_id set to 0, and organization with id=0 does not exist. This will happen for every batch_op table. It doesn't makes sense because you add two organizations yourself here and incrementation starts at 1.
MariaDB [exafs]> describe organization;
+--------+--------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+--------+--------------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| name | varchar(150) | YES | UNI | NULL | |
| arange | text | YES | | NULL | |
+--------+--------------+------+-----+---------+----------------+We have inserted our own organization with id=3 and we are not interested in using different organization, therefore the fix is kind of straightforward (well, one way to do it) - we have set the default org_id to 3 for each column in each table:
batch_op.add_column(sa.Column('org_id', sa.Integer(), nullable=False, server_default='3'))All the records were correctly mapped to our organization (on testing environment) and flask db upgrade passed.
Anyway, is there a chance you could make the migrations consistent? Because stating that Migrations can be inconsistent. and removing them from the repository does not seems like a correct way to manage software. Migrations should be consistent and predictable, so I can be 100% sure the upgrade will not fail and the database schema is correct - how are migrations autogenerated for each patch predictable? How can I be sure the schema is correct? My kind of error that I've described above is preventable if the original migration is shared and versioned.