diff --git a/Access token.txt b/Access token.txt
index 33a461a47ffadf33353936235e7222eda6654139..abad50bb1b8016dbe511ed49702f9d91b68871d1 100644
--- a/Access token.txt	
+++ b/Access token.txt	
@@ -1,4 +1,6 @@
-Access token
+Access token for cmt120_3
+SkDKs2q-zBLHzn_bj-fN
+
 6vJqLP1vu_gQW1YyCAFp
 
 old one: TXoj4fY2FNam2Liresky
\ No newline at end of file
diff --git a/C2051326_report.pdf b/C2051326_report.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..cc4f46eb24da6a4b35bd1ddfef4adc6ac88ef43e
Binary files /dev/null and b/C2051326_report.pdf differ
diff --git a/Website Report CMT120.docx b/Website Report CMT120.docx
deleted file mode 100644
index 8feeaf61a00fdedabc4577a30c0f7d8a8288b863..0000000000000000000000000000000000000000
Binary files a/Website Report CMT120.docx and /dev/null differ
diff --git a/Website Report CMT120.pdf b/Website Report CMT120.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..f06b6e5f4d96fbe4a5dcc050889868d3f3f591e9
Binary files /dev/null and b/Website Report CMT120.pdf differ
diff --git a/__pycache__/app.cpython-311.pyc b/__pycache__/app.cpython-311.pyc
index 1fe162114633c19e3c472bfd860eca557f8c46ec..d4f34ec8cd428cccc439b9fbf23b44f4ed539227 100644
Binary files a/__pycache__/app.cpython-311.pyc and b/__pycache__/app.cpython-311.pyc differ
diff --git a/__pycache__/wsgi.cpython-311.pyc b/__pycache__/wsgi.cpython-311.pyc
index c235da66be9a1612cccd869f36b6db56c868b02b..aa9a2ba1ddf386b904f2608e6cdd41d4650a72c8 100644
Binary files a/__pycache__/wsgi.cpython-311.pyc and b/__pycache__/wsgi.cpython-311.pyc differ
diff --git a/app.py b/app.py
index ef70f3c81fdfa586a28864dc8a16ff74a66bd6d1..d0a5a1ad0166a949335494d9a8c5590c70a9cecb 100644
--- a/app.py
+++ b/app.py
@@ -2,11 +2,14 @@ import os
 import secrets
 from flask import Flask, render_template, request, redirect, url_for, send_from_directory, abort
 from flask_sqlalchemy import SQLAlchemy
+from flask_wtf import FlaskForm
+from wtforms import StringField, SubmitField
 
-app = Flask('cmt120_2', static_folder='static')
+app = Flask(__name__, static_folder='static')
 app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
 app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
 app.config['SECRET_KEY'] = secrets.token_hex(16)
+app.config['WTF_CSRF_ENABLED'] = True
 
 db = SQLAlchemy(app)
 
@@ -15,6 +18,11 @@ class Project(db.Model):
     title = db.Column(db.String(100), nullable=False)
     description = db.Column(db.Text, nullable=False)
 
+class AddProjectForm(FlaskForm):
+    title = StringField('Title')
+    description = StringField('Description')
+    submit = SubmitField('Submit')
+
 @app.route('/')
 def home():
     try:
@@ -24,46 +32,52 @@ def home():
         print(f"Error fetching projects: {str(e)}")
         return render_template('error.html')
 
+# New route for the 'about' page
 @app.route('/about')
 def about():
     return render_template('about.html')
 
-# New route for the Experience page
+# New route for the 'experience' page
 @app.route('/experience')
 def experience():
     # Add logic to fetch data related to the Experience section if needed
     return render_template('experience.html')
 
-# New route for the Portfolio page
+# New route for the 'portfolio' page
 @app.route('/portfolio')
 def portfolio():
     # Add logic to fetch data related to the Portfolio section if needed
     return render_template('portfolio.html')
 
-# New route for the contact page
+# New route for the 'contact' page
 @app.route('/contact')
 def contact():
     return render_template('contact.html')
 
-# New route for adding a project
+# Updated route for adding a project with Flask-WTF form
 @app.route('/add_project', methods=['GET', 'POST'])
 def add_project():
-    if request.method == 'POST':
-        title = request.form['title']
-        description = request.form['description']
-        new_project = Project(title=title, description=description)
+    form = AddProjectForm()
+
+    if form.validate_on_submit():
+        # Print or log the form data to check if it's received
+        print(f"Received form data - Title: {form.title.data}, Description: {form.description.data}")
+
+        new_project = Project(title=form.title.data, description=form.description.data)
         db.session.add(new_project)
         db.session.commit()
         return redirect(url_for('home'))
-    return render_template('add_project.html')
 
-# New route for serving the my-cv.docx file
+    return render_template('add_project.html', form=form)
+
+# Updated route for serving the 'my-cv.docx' file
 @app.route('/download_cv')
 def download_cv():
     file_path = 'static/my-cv.docx'
     print(f"Attempting to serve file: {file_path}")
     return send_from_directory('static', 'my-cv.docx', as_attachment=True, mimetype='application/docx')
 
+# Updated route for serving assessment files
 @app.route('/download_assessment/<filename>')
 def download_assessment(filename):
     try:
@@ -77,9 +91,6 @@ def download_assessment(filename):
         print(f"Error serving assessment file: {str(e)}")
         app.logger.exception(f"Error serving assessment file: {str(e)}")
         abort(500) 
-        
-if __name__ == '__main__':
-    with app.app_context():
-        db.create_all()
 
-    app.run(debug=True, host='0.0.0.0')
\ No newline at end of file
+if __name__ == '__main__':
+    app.run(debug=True, port=int(os.environ.get('PORT', 5000)))
\ No newline at end of file
diff --git a/gunicorn_config.py b/gunicorn_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f9741dbdca6bef5c1289bcf6f4b1846bd104b2b
--- /dev/null
+++ b/gunicorn_config.py
@@ -0,0 +1,5 @@
+# gunicorn_config.py
+from waitress import serve
+from app import app  # Assuming your Flask app instance is named 'app'
+
+serve(app, host='0.0.0.0', port=8080)
diff --git a/instance/site.db b/instance/site.db
index d693d9df08ed4a4db809c793dd72904aa830d22d..771a49d06db20ccee40b021d06b261f00af8f7e5 100644
Binary files a/instance/site.db and b/instance/site.db differ
diff --git a/templates/about.html b/templates/about.html
index 943e9d50bceaaec06820286b03159eb237a92aba..5bc03cff1b781e8447564b20adde9e2af41e11fa 100644
--- a/templates/about.html
+++ b/templates/about.html
@@ -15,6 +15,7 @@
             <li><a href="{{ url_for('about') }}">About Me</a></li>
             <li><a href="{{ url_for('experience') }}">Experience</a></li>
             <li><a href="{{ url_for('portfolio') }}">Portfolio</a></li>
+            <li><a href="{{ url_for('add_project') }}">Add Project</a></li>
             <li><a href="{{ url_for('contact') }}">Contact</a></li>
         </ul>
     </nav>
diff --git a/templates/add_project.html b/templates/add_project.html
new file mode 100644
index 0000000000000000000000000000000000000000..6d9f8b3375181a385b3591e312818712eb925b59
--- /dev/null
+++ b/templates/add_project.html
@@ -0,0 +1,28 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
+    <title>Add Project - Personal Portfolio Website</title>
+    <link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.1.5/jszip.min.js"></script>
+    <script src="https://kit.fontawesome.com/221d185582.js" crossorigin="anonymous"></script>
+</head>
+<body>
+    <h2>Add Project</h2>
+    <form method="POST" action="{{ url_for('add_project') }}">
+        {{ form.csrf_token }}
+        <div class="form-group">
+            {{ form.title.label }}
+            {{ form.title(class="form-control", placeholder="Enter project title") }}
+        </div>
+        <div class="form-group">
+            {{ form.description.label }}
+            {{ form.description(class="form-control", placeholder="Enter project description") }}
+        </div>
+        <div class="form-group">
+            {{ form.submit(class="btn btn-primary") }}
+        </div>
+    </form>
+</body>
+</html>
diff --git a/templates/contact.html b/templates/contact.html
index 99fe94fe67cdd567126450fe70eaf02b6b2989fc..006a93c220d3021e58664877558216ef6f329a7a 100644
--- a/templates/contact.html
+++ b/templates/contact.html
@@ -69,6 +69,7 @@
             <li><a href="{{ url_for('about') }}">About Me</a></li>
             <li><a href="{{ url_for('experience') }}">Experience</a></li>
             <li><a href="{{ url_for('portfolio') }}">Portfolio</a></li>
+            <li><a href="{{ url_for('add_project') }}">Add Project</a></li>
             <li><a href="{{ url_for('contact') }}">Contact</a></li>
         </ul>
     </nav>
diff --git a/templates/experience.html b/templates/experience.html
index aa057f9ac42d5c5c6ec5d12bc3150fbc26d6edef..1458692882f18941aa47857a03d5c971fbf39a16 100644
--- a/templates/experience.html
+++ b/templates/experience.html
@@ -23,6 +23,7 @@
             <li><a href="{{ url_for('about') }}">About Me</a></li>
             <li><a href="{{ url_for('experience') }}">Experience</a></li>
             <li><a href="{{ url_for('portfolio') }}">Portfolio</a></li>
+            <li><a href="{{ url_for('add_project') }}">Add Project</a></li>
             <li><a href="{{ url_for('contact') }}">Contact</a></li>
         </ul>
     </nav>
diff --git a/templates/index.html b/templates/index.html
index 9317c5ec212f25f6515a6b57f0c816f98cdabf82..37747fb1602f1753a95a023c52c120c013b80079 100644
--- a/templates/index.html
+++ b/templates/index.html
@@ -32,6 +32,7 @@
                     <li><a href="{{ url_for('about') }}">About Me</a></li>
                     <li><a href="{{ url_for('experience') }}">Experience</a></li>
                     <li><a href="{{ url_for('portfolio') }}">Portfolio</a></li>
+                    <li><a href="{{ url_for('add_project') }}">Add Project</a></li>
                     <li><a href="{{ url_for('contact') }}">Contact</a></li>
                 </ul>
             </nav>            
diff --git a/templates/portfolio.html b/templates/portfolio.html
index 6ba68837d10e9074a1bbb9b05a34c4b8177243ae..3a394482bba57f7b249ce2876e69696a1c412d04 100644
--- a/templates/portfolio.html
+++ b/templates/portfolio.html
@@ -28,6 +28,7 @@
             <li><a href="{{ url_for('about') }}">About Me</a></li>
             <li><a href="{{ url_for('experience') }}">Experience</a></li>
             <li><a href="{{ url_for('portfolio') }}">Portfolio</a></li>
+            <li><a href="{{ url_for('add_project') }}">Add Project</a></li>
             <li><a href="{{ url_for('contact') }}">Contact</a></li>
         </ul>
     </nav>
diff --git a/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/INSTALLER b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/LICENSE b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..2448fd2665df720101cf6662b17bfdfbda3e7cc3
--- /dev/null
+++ b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Miguel Grinberg
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/METADATA b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..0590a4cf5709e462f24060029379c488f254ab53
--- /dev/null
+++ b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/METADATA
@@ -0,0 +1,86 @@
+Metadata-Version: 2.1
+Name: Flask-Migrate
+Version: 4.0.5
+Summary: SQLAlchemy database migrations for Flask applications using Alembic.
+Home-page: https://github.com/miguelgrinberg/flask-migrate
+Author: Miguel Grinberg
+Author-email: miguel.grinberg@gmail.com
+License: MIT
+Project-URL: Bug Tracker, https://github.com/miguelgrinberg/flask-migrate/issues
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Requires-Python: >=3.6
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: Flask >=0.9
+Requires-Dist: Flask-SQLAlchemy >=1.0
+Requires-Dist: alembic >=1.9.0
+
+Flask-Migrate
+=============
+
+[![Build status](https://github.com/miguelgrinberg/flask-migrate/workflows/build/badge.svg)](https://github.com/miguelgrinberg/flask-migrate/actions)
+
+Flask-Migrate is an extension that handles SQLAlchemy database migrations for Flask applications using Alembic. The database operations are provided as command-line arguments under the `flask db` command.
+
+Installation
+------------
+
+Install Flask-Migrate with `pip`:
+
+    pip install Flask-Migrate
+
+Example
+-------
+
+This is an example application that handles database migrations through Flask-Migrate:
+
+```python
+from flask import Flask
+from flask_sqlalchemy import SQLAlchemy
+from flask_migrate import Migrate
+
+app = Flask(__name__)
+app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
+
+db = SQLAlchemy(app)
+migrate = Migrate(app, db)
+
+class User(db.Model):
+    id = db.Column(db.Integer, primary_key=True)
+    name = db.Column(db.String(128))
+```
+
+With the above application you can create the database or enable migrations if the database already exists with the following command:
+
+    $ flask db init
+
+Note that the `FLASK_APP` environment variable must be set according to the Flask documentation for this command to work. This will add a `migrations` folder to your application. The contents of this folder need to be added to version control along with your other source files. 
+
+You can then generate an initial migration:
+
+    $ flask db migrate
+    
+The migration script needs to be reviewed and edited, as Alembic currently does not detect every change you make to your models. In particular, Alembic is currently unable to detect indexes. Once finalized, the migration script also needs to be added to version control.
+
+Then you can apply the migration to the database:
+
+    $ flask db upgrade
+    
+Then each time the database models change repeat the `migrate` and `upgrade` commands.
+
+To sync the database in another system just refresh the `migrations` folder from source control and run the `upgrade` command.
+
+To see all the commands that are available run this command:
+
+    $ flask db --help
+
+Resources
+---------
+
+- [Documentation](http://flask-migrate.readthedocs.io/en/latest/)
+- [pypi](https://pypi.python.org/pypi/Flask-Migrate) 
+- [Change Log](https://github.com/miguelgrinberg/Flask-Migrate/blob/master/CHANGES.md)
diff --git a/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/RECORD b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..dc8366d5d995aa50b259e46868e327ebdcd82cb8
--- /dev/null
+++ b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/RECORD
@@ -0,0 +1,31 @@
+Flask_Migrate-4.0.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Flask_Migrate-4.0.5.dist-info/LICENSE,sha256=kfkXGlJQvKy3Y__6tAJ8ynIp1HQfeROXhL8jZU1d-DI,1082
+Flask_Migrate-4.0.5.dist-info/METADATA,sha256=d-EcnhZa_vyVAph2u84OpGIteJaBmqLQxO5Rf6wUI7Y,3095
+Flask_Migrate-4.0.5.dist-info/RECORD,,
+Flask_Migrate-4.0.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+Flask_Migrate-4.0.5.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
+Flask_Migrate-4.0.5.dist-info/top_level.txt,sha256=jLoPgiMG6oR4ugNteXn3IHskVVIyIXVStZOVq-AWLdU,14
+flask_migrate/__init__.py,sha256=-JFdExGtr7UrwCpmjYvTfzFHqMjE7AmP0Rr3T53tBNU,10037
+flask_migrate/__pycache__/__init__.cpython-311.pyc,,
+flask_migrate/__pycache__/cli.cpython-311.pyc,,
+flask_migrate/cli.py,sha256=H-N4NNS5HyEB61HpUADLU8pW3naejyDPgeEbzEqG5-w,10298
+flask_migrate/templates/aioflask-multidb/README,sha256=Ek4cJqTaxneVjtkue--BXMlfpfp3MmJRjqoZvnSizww,43
+flask_migrate/templates/aioflask-multidb/__pycache__/env.cpython-311.pyc,,
+flask_migrate/templates/aioflask-multidb/alembic.ini.mako,sha256=SjYEmJKzz6K8QfuZWtLJAJWcCKOdRbfUhsVlpgv8ock,857
+flask_migrate/templates/aioflask-multidb/env.py,sha256=UcjeqkAbyUjTkuQFmCFPG7QOvqhco8-uGp8QEbto0T8,6573
+flask_migrate/templates/aioflask-multidb/script.py.mako,sha256=198VPxVEN3NZ3vHcRuCxSoI4XnOYirGWt01qkbPKoJw,1246
+flask_migrate/templates/aioflask/README,sha256=KKqWGl4YC2RqdOdq-y6quTDW0b7D_UZNHuM8glM1L-c,44
+flask_migrate/templates/aioflask/__pycache__/env.cpython-311.pyc,,
+flask_migrate/templates/aioflask/alembic.ini.mako,sha256=SjYEmJKzz6K8QfuZWtLJAJWcCKOdRbfUhsVlpgv8ock,857
+flask_migrate/templates/aioflask/env.py,sha256=m6ZtBhdpwuq89vVeLTWmNT-1NfJZqarC_hsquCdR9bw,3478
+flask_migrate/templates/aioflask/script.py.mako,sha256=8_xgA-gm_OhehnO7CiIijWgnm00ZlszEHtIHrAYFJl0,494
+flask_migrate/templates/flask-multidb/README,sha256=AfiP5foaV2odZxXxuUuSIS6YhkIpR7CsOo2mpuxwHdc,40
+flask_migrate/templates/flask-multidb/__pycache__/env.cpython-311.pyc,,
+flask_migrate/templates/flask-multidb/alembic.ini.mako,sha256=SjYEmJKzz6K8QfuZWtLJAJWcCKOdRbfUhsVlpgv8ock,857
+flask_migrate/templates/flask-multidb/env.py,sha256=F44iqsAxLTVBN_zD8CMUkdE7Aub4niHMmo5wl9mY4Uw,6190
+flask_migrate/templates/flask-multidb/script.py.mako,sha256=198VPxVEN3NZ3vHcRuCxSoI4XnOYirGWt01qkbPKoJw,1246
+flask_migrate/templates/flask/README,sha256=JL0NrjOrscPcKgRmQh1R3hlv1_rohDot0TvpmdM27Jk,41
+flask_migrate/templates/flask/__pycache__/env.cpython-311.pyc,,
+flask_migrate/templates/flask/alembic.ini.mako,sha256=SjYEmJKzz6K8QfuZWtLJAJWcCKOdRbfUhsVlpgv8ock,857
+flask_migrate/templates/flask/env.py,sha256=ibK1hsdOsOBzXNU2yQoAIza7f_EFzaVSWwON_NSpNzQ,3344
+flask_migrate/templates/flask/script.py.mako,sha256=8_xgA-gm_OhehnO7CiIijWgnm00ZlszEHtIHrAYFJl0,494
diff --git a/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/REQUESTED b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/WHEEL b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7
--- /dev/null
+++ b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.41.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/top_level.txt b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0652762c7b98cd2758190e2dadd48dcd1cc158a3
--- /dev/null
+++ b/venv/Lib/site-packages/Flask_Migrate-4.0.5.dist-info/top_level.txt
@@ -0,0 +1 @@
+flask_migrate
diff --git a/venv/Lib/site-packages/Mako-1.3.0.dist-info/INSTALLER b/venv/Lib/site-packages/Mako-1.3.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/Lib/site-packages/Mako-1.3.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/Lib/site-packages/Mako-1.3.0.dist-info/LICENSE b/venv/Lib/site-packages/Mako-1.3.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..01bb1bdfb0d6167a541e5e2ab572d7e19f541471
--- /dev/null
+++ b/venv/Lib/site-packages/Mako-1.3.0.dist-info/LICENSE
@@ -0,0 +1,19 @@
+Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/venv/Lib/site-packages/Mako-1.3.0.dist-info/METADATA b/venv/Lib/site-packages/Mako-1.3.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..bcaa4c9561d37d31895377592574b4223d4421e2
--- /dev/null
+++ b/venv/Lib/site-packages/Mako-1.3.0.dist-info/METADATA
@@ -0,0 +1,87 @@
+Metadata-Version: 2.1
+Name: Mako
+Version: 1.3.0
+Summary: A super-fast templating language that borrows the best ideas from the existing templating languages.
+Home-page: https://www.makotemplates.org/
+Author: Mike Bayer
+Author-email: mike@zzzcomputing.com
+License: MIT
+Project-URL: Documentation, https://docs.makotemplates.org
+Project-URL: Issue Tracker, https://github.com/sqlalchemy/mako
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Requires-Python: >=3.8
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Requires-Dist: MarkupSafe >=0.9.2
+Provides-Extra: babel
+Requires-Dist: Babel ; extra == 'babel'
+Provides-Extra: lingua
+Requires-Dist: lingua ; extra == 'lingua'
+Provides-Extra: testing
+Requires-Dist: pytest ; extra == 'testing'
+
+=========================
+Mako Templates for Python
+=========================
+
+Mako is a template library written in Python. It provides a familiar, non-XML 
+syntax which compiles into Python modules for maximum performance. Mako's 
+syntax and API borrows from the best ideas of many others, including Django
+templates, Cheetah, Myghty, and Genshi. Conceptually, Mako is an embedded 
+Python (i.e. Python Server Page) language, which refines the familiar ideas
+of componentized layout and inheritance to produce one of the most 
+straightforward and flexible models available, while also maintaining close 
+ties to Python calling and scoping semantics.
+
+Nutshell
+========
+
+::
+
+    <%inherit file="base.html"/>
+    <%
+        rows = [[v for v in range(0,10)] for row in range(0,10)]
+    %>
+    <table>
+        % for row in rows:
+            ${makerow(row)}
+        % endfor
+    </table>
+
+    <%def name="makerow(row)">
+        <tr>
+        % for name in row:
+            <td>${name}</td>\
+        % endfor
+        </tr>
+    </%def>
+
+Philosophy
+===========
+
+Python is a great scripting language. Don't reinvent the wheel...your templates can handle it !
+
+Documentation
+==============
+
+See documentation for Mako at https://docs.makotemplates.org/en/latest/
+
+License
+========
+
+Mako is licensed under an MIT-style license (see LICENSE).
+Other incorporated projects may be licensed under different licenses.
+All licenses allow for non-commercial and commercial use.
diff --git a/venv/Lib/site-packages/Mako-1.3.0.dist-info/RECORD b/venv/Lib/site-packages/Mako-1.3.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..eb28471560cad0d461d0a1a07b6886243d28104e
--- /dev/null
+++ b/venv/Lib/site-packages/Mako-1.3.0.dist-info/RECORD
@@ -0,0 +1,74 @@
+../../Scripts/mako-render.exe,sha256=ci7yA_2_jWVZk-66Ebgx8lxwUj8iNqHX1-jZYg8Sm9c,108409
+Mako-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Mako-1.3.0.dist-info/LICENSE,sha256=CjJuRI3SLyIN0weWZ33b3z3apkJ6l0T-qd6qUdnXT9k,1098
+Mako-1.3.0.dist-info/METADATA,sha256=gFHrHTKQc9wK1f8a2KoWXWWA7HRWg_qKydnHpOOn268,2900
+Mako-1.3.0.dist-info/RECORD,,
+Mako-1.3.0.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
+Mako-1.3.0.dist-info/entry_points.txt,sha256=LsKkUsOsJQYbJ2M72hZCm968wi5K8Ywb5uFxCuN8Obk,512
+Mako-1.3.0.dist-info/top_level.txt,sha256=LItdH8cDPetpUu8rUyBG3DObS6h9Gcpr9j_WLj2S-R0,5
+mako/__init__.py,sha256=gqzc5_SJeF8RrCRDx0wRjcNG0TM8M6COf3h88sRfGJA,242
+mako/__pycache__/__init__.cpython-311.pyc,,
+mako/__pycache__/_ast_util.cpython-311.pyc,,
+mako/__pycache__/ast.cpython-311.pyc,,
+mako/__pycache__/cache.cpython-311.pyc,,
+mako/__pycache__/cmd.cpython-311.pyc,,
+mako/__pycache__/codegen.cpython-311.pyc,,
+mako/__pycache__/compat.cpython-311.pyc,,
+mako/__pycache__/exceptions.cpython-311.pyc,,
+mako/__pycache__/filters.cpython-311.pyc,,
+mako/__pycache__/lexer.cpython-311.pyc,,
+mako/__pycache__/lookup.cpython-311.pyc,,
+mako/__pycache__/parsetree.cpython-311.pyc,,
+mako/__pycache__/pygen.cpython-311.pyc,,
+mako/__pycache__/pyparser.cpython-311.pyc,,
+mako/__pycache__/runtime.cpython-311.pyc,,
+mako/__pycache__/template.cpython-311.pyc,,
+mako/__pycache__/util.cpython-311.pyc,,
+mako/_ast_util.py,sha256=w-UTrb7VIVQvKvQvvPcg_gIlgJlnit2XEjlIm7JIrUM,20247
+mako/ast.py,sha256=qsUPb0VJFqbxXtsBnAIjyKUJVmNV2GWM5X7PuB2No1U,6642
+mako/cache.py,sha256=K4jECv4MyDyf6e-Y_NHpgNHmBBBfQglC6WQgJF5xtWA,7680
+mako/cmd.py,sha256=UBIPOgOOhwJJsWBDnEENqtkRIUwgU5JdMb7q-fkabkk,2813
+mako/codegen.py,sha256=yxvuOPoK4ojW_z2LNUyWGfxybVfrtNsP0L2pHPPvMZI,47307
+mako/compat.py,sha256=fEdnnyNvf9pLe9nHT0kp1rPcf7W6XLN8wvBvw5P9zuc,1820
+mako/exceptions.py,sha256=gzyhUpLgaCkRNW7pHosTswXYQ-Iamx_3p4Eg7-2ERfM,12530
+mako/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+mako/ext/__pycache__/__init__.cpython-311.pyc,,
+mako/ext/__pycache__/autohandler.cpython-311.pyc,,
+mako/ext/__pycache__/babelplugin.cpython-311.pyc,,
+mako/ext/__pycache__/beaker_cache.cpython-311.pyc,,
+mako/ext/__pycache__/extract.cpython-311.pyc,,
+mako/ext/__pycache__/linguaplugin.cpython-311.pyc,,
+mako/ext/__pycache__/preprocessors.cpython-311.pyc,,
+mako/ext/__pycache__/pygmentplugin.cpython-311.pyc,,
+mako/ext/__pycache__/turbogears.cpython-311.pyc,,
+mako/ext/autohandler.py,sha256=4AzItz08tNY3NX9SRcknXPAWmZSP4KJE2OgV2I3uIQs,1885
+mako/ext/babelplugin.py,sha256=j11aT7ASthmej7Rwdh7Wx8gY7uuHEjVy1tDFks3uDmE,2091
+mako/ext/beaker_cache.py,sha256=vBAq9cKkadcTZ0C_GOKfoy5d-1oa8HJ3d_OvuK_GCyc,2578
+mako/ext/extract.py,sha256=emNVqQJNDMVWxpSmbIJWfiQpvBeyrlTb3vjf0LXLir0,4659
+mako/ext/linguaplugin.py,sha256=vluF2V6eW0y3C_QVTiTVnRLAMheje7FiTlg3p2mU3Lc,1935
+mako/ext/preprocessors.py,sha256=VdZ3-wW-5Bl1Gn7Ekzr7QBO_I7qToCLNOtmumUJh8TQ,576
+mako/ext/pygmentplugin.py,sha256=T57_uHzGi4ULI_wBOdXCq_JtrRKkp498OqQwwoHNL68,4753
+mako/ext/turbogears.py,sha256=34Q-pPPHvBTgbukcJ71YdxcEj0VY1vVeSS8HPCPS0tI,2141
+mako/filters.py,sha256=4PLsUBegJHypYF0KVEMNku5nxK4jYZN1HQfqpNDMMec,4658
+mako/lexer.py,sha256=gsrn8BHSk42K97RsCHy8xutdX2_HxpuvUDPnp85ogbA,15982
+mako/lookup.py,sha256=ueXJ7bIVvAxJaiawkSkv7xbGLrePwHzHvEtDlty-1Fs,12428
+mako/parsetree.py,sha256=eYCga2ONqrr_AJPTUtHg-LxBIiwSCZyh9Zs7tIZaOJo,19007
+mako/pygen.py,sha256=jyMpoPpuR4_XWitv4KmQDzJSdH0-w_nIRORIRAillOM,10416
+mako/pyparser.py,sha256=wpUtzfy0ssSs0gjh7VOIYZYQYZL5jODw4hvKID3Qbqg,7029
+mako/runtime.py,sha256=ZsUEN22nX3d3dECQujF69mBKDQS6yVv2nvz_0eTvFGg,27804
+mako/template.py,sha256=Jk45aBTC7JoeB3vFViJn3JZR6InA69oBuCE3Z2ZnkiM,23857
+mako/testing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+mako/testing/__pycache__/__init__.cpython-311.pyc,,
+mako/testing/__pycache__/_config.cpython-311.pyc,,
+mako/testing/__pycache__/assertions.cpython-311.pyc,,
+mako/testing/__pycache__/config.cpython-311.pyc,,
+mako/testing/__pycache__/exclusions.cpython-311.pyc,,
+mako/testing/__pycache__/fixtures.cpython-311.pyc,,
+mako/testing/__pycache__/helpers.cpython-311.pyc,,
+mako/testing/_config.py,sha256=k-qpnsnbXUoN-ykMN5BRpg84i1x0p6UsAddKQnrIytU,3566
+mako/testing/assertions.py,sha256=pfbGl84QlW7QWGg3_lo3wP8XnBAVo9AjzNp2ajmn7FA,5161
+mako/testing/config.py,sha256=wmYVZfzGvOK3mJUZpzmgO8-iIgvaCH41Woi4yDpxq6E,323
+mako/testing/exclusions.py,sha256=_t6ADKdatk3f18tOfHV_ZY6u_ZwQsKphZ2MXJVSAOcI,1553
+mako/testing/fixtures.py,sha256=nEp7wTusf7E0n3Q-BHJW2s_t1vx0KB9poadQ1BmIJzE,3044
+mako/testing/helpers.py,sha256=kTaIg8OL1uvcuLptbRA_aJtGndIDDaxAzacYbv_Km1Q,1521
+mako/util.py,sha256=a1o6otEPqbE7KeO-8YJXv5ifTIG-Iu_k1NkI4GKAm44,10638
diff --git a/venv/Lib/site-packages/Mako-1.3.0.dist-info/WHEEL b/venv/Lib/site-packages/Mako-1.3.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..ba48cbcf9275ac6d88fe25821695e14d0a822e79
--- /dev/null
+++ b/venv/Lib/site-packages/Mako-1.3.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.41.3)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/venv/Lib/site-packages/Mako-1.3.0.dist-info/entry_points.txt b/venv/Lib/site-packages/Mako-1.3.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..30f31b2bf217432c8a80e4a097268c7ffd79a986
--- /dev/null
+++ b/venv/Lib/site-packages/Mako-1.3.0.dist-info/entry_points.txt
@@ -0,0 +1,18 @@
+[babel.extractors]
+mako = mako.ext.babelplugin:extract [babel]
+
+[console_scripts]
+mako-render = mako.cmd:cmdline
+
+[lingua.extractors]
+mako = mako.ext.linguaplugin:LinguaMakoExtractor [lingua]
+
+[pygments.lexers]
+css+mako = mako.ext.pygmentplugin:MakoCssLexer
+html+mako = mako.ext.pygmentplugin:MakoHtmlLexer
+js+mako = mako.ext.pygmentplugin:MakoJavascriptLexer
+mako = mako.ext.pygmentplugin:MakoLexer
+xml+mako = mako.ext.pygmentplugin:MakoXmlLexer
+
+[python.templating.engines]
+mako = mako.ext.turbogears:TGPlugin
diff --git a/venv/Lib/site-packages/Mako-1.3.0.dist-info/top_level.txt b/venv/Lib/site-packages/Mako-1.3.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2951cdd49d33f08a4f5af6213f315e571274c854
--- /dev/null
+++ b/venv/Lib/site-packages/Mako-1.3.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+mako
diff --git a/venv/Lib/site-packages/alembic-1.13.1.dist-info/INSTALLER b/venv/Lib/site-packages/alembic-1.13.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/Lib/site-packages/alembic-1.13.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/Lib/site-packages/alembic-1.13.1.dist-info/LICENSE b/venv/Lib/site-packages/alembic-1.13.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..74b9ce34299e78b1abd96c7128cd036332b6a511
--- /dev/null
+++ b/venv/Lib/site-packages/alembic-1.13.1.dist-info/LICENSE
@@ -0,0 +1,19 @@
+Copyright 2009-2023 Michael Bayer.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic-1.13.1.dist-info/METADATA b/venv/Lib/site-packages/alembic-1.13.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..7a6884d9353033a739909c0dc0a2dfe80a8c3a78
--- /dev/null
+++ b/venv/Lib/site-packages/alembic-1.13.1.dist-info/METADATA
@@ -0,0 +1,142 @@
+Metadata-Version: 2.1
+Name: alembic
+Version: 1.13.1
+Summary: A database migration tool for SQLAlchemy.
+Home-page: https://alembic.sqlalchemy.org
+Author: Mike Bayer
+Author-email: mike_mp@zzzcomputing.com
+License: MIT
+Project-URL: Documentation, https://alembic.sqlalchemy.org/en/latest/
+Project-URL: Changelog, https://alembic.sqlalchemy.org/en/latest/changelog.html
+Project-URL: Source, https://github.com/sqlalchemy/alembic/
+Project-URL: Issue Tracker, https://github.com/sqlalchemy/alembic/issues/
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Environment :: Console
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Database :: Front-Ends
+Requires-Python: >=3.8
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Requires-Dist: SQLAlchemy >=1.3.0
+Requires-Dist: Mako
+Requires-Dist: typing-extensions >=4
+Requires-Dist: importlib-metadata ; python_version < "3.9"
+Requires-Dist: importlib-resources ; python_version < "3.9"
+Provides-Extra: tz
+Requires-Dist: backports.zoneinfo ; (python_version < "3.9") and extra == 'tz'
+
+Alembic is a database migrations tool written by the author
+of `SQLAlchemy <http://www.sqlalchemy.org>`_.  A migrations tool
+offers the following functionality:
+
+* Can emit ALTER statements to a database in order to change
+  the structure of tables and other constructs
+* Provides a system whereby "migration scripts" may be constructed;
+  each script indicates a particular series of steps that can "upgrade" a
+  target database to a new version, and optionally a series of steps that can
+  "downgrade" similarly, doing the same steps in reverse.
+* Allows the scripts to execute in some sequential manner.
+
+The goals of Alembic are:
+
+* Very open ended and transparent configuration and operation.   A new
+  Alembic environment is generated from a set of templates which is selected
+  among a set of options when setup first occurs. The templates then deposit a
+  series of scripts that define fully how database connectivity is established
+  and how migration scripts are invoked; the migration scripts themselves are
+  generated from a template within that series of scripts. The scripts can
+  then be further customized to define exactly how databases will be
+  interacted with and what structure new migration files should take.
+* Full support for transactional DDL.   The default scripts ensure that all
+  migrations occur within a transaction - for those databases which support
+  this (Postgresql, Microsoft SQL Server), migrations can be tested with no
+  need to manually undo changes upon failure.
+* Minimalist script construction.  Basic operations like renaming
+  tables/columns, adding/removing columns, changing column attributes can be
+  performed through one line commands like alter_column(), rename_table(),
+  add_constraint(). There is no need to recreate full SQLAlchemy Table
+  structures for simple operations like these - the functions themselves
+  generate minimalist schema structures behind the scenes to achieve the given
+  DDL sequence.
+* "auto generation" of migrations. While real world migrations are far more
+  complex than what can be automatically determined, Alembic can still
+  eliminate the initial grunt work in generating new migration directives
+  from an altered schema.  The ``--autogenerate`` feature will inspect the
+  current status of a database using SQLAlchemy's schema inspection
+  capabilities, compare it to the current state of the database model as
+  specified in Python, and generate a series of "candidate" migrations,
+  rendering them into a new migration script as Python directives. The
+  developer then edits the new file, adding additional directives and data
+  migrations as needed, to produce a finished migration. Table and column
+  level changes can be detected, with constraints and indexes to follow as
+  well.
+* Full support for migrations generated as SQL scripts.   Those of us who
+  work in corporate environments know that direct access to DDL commands on a
+  production database is a rare privilege, and DBAs want textual SQL scripts.
+  Alembic's usage model and commands are oriented towards being able to run a
+  series of migrations into a textual output file as easily as it runs them
+  directly to a database. Care must be taken in this mode to not invoke other
+  operations that rely upon in-memory SELECTs of rows - Alembic tries to
+  provide helper constructs like bulk_insert() to help with data-oriented
+  operations that are compatible with script-based DDL.
+* Non-linear, dependency-graph versioning.   Scripts are given UUID
+  identifiers similarly to a DVCS, and the linkage of one script to the next
+  is achieved via human-editable markers within the scripts themselves.
+  The structure of a set of migration files is considered as a
+  directed-acyclic graph, meaning any migration file can be dependent
+  on any other arbitrary set of migration files, or none at
+  all.  Through this open-ended system, migration files can be organized
+  into branches, multiple roots, and mergepoints, without restriction.
+  Commands are provided to produce new branches, roots, and merges of
+  branches automatically.
+* Provide a library of ALTER constructs that can be used by any SQLAlchemy
+  application. The DDL constructs build upon SQLAlchemy's own DDLElement base
+  and can be used standalone by any application or script.
+* At long last, bring SQLite and its inability to ALTER things into the fold,
+  but in such a way that SQLite's very special workflow needs are accommodated
+  in an explicit way that makes the most of a bad situation, through the
+  concept of a "batch" migration, where multiple changes to a table can
+  be batched together to form a series of instructions for a single, subsequent
+  "move-and-copy" workflow.   You can even use "move-and-copy" workflow for
+  other databases, if you want to recreate a table in the background
+  on a busy system.
+
+Documentation and status of Alembic is at https://alembic.sqlalchemy.org/
+
+The SQLAlchemy Project
+======================
+
+Alembic is part of the `SQLAlchemy Project <https://www.sqlalchemy.org>`_ and
+adheres to the same standards and conventions as the core project.
+
+Development / Bug reporting / Pull requests
+___________________________________________
+
+Please refer to the
+`SQLAlchemy Community Guide <https://www.sqlalchemy.org/develop.html>`_ for
+guidelines on coding and participating in this project.
+
+Code of Conduct
+_______________
+
+Above all, SQLAlchemy places great emphasis on polite, thoughtful, and
+constructive communication between users and developers.
+Please see our current Code of Conduct at
+`Code of Conduct <https://www.sqlalchemy.org/codeofconduct.html>`_.
+
+License
+=======
+
+Alembic is distributed under the `MIT license
+<https://opensource.org/licenses/MIT>`_.
diff --git a/venv/Lib/site-packages/alembic-1.13.1.dist-info/RECORD b/venv/Lib/site-packages/alembic-1.13.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..bd62d91be350b51d5b0ac4838a24af6b623b6c48
--- /dev/null
+++ b/venv/Lib/site-packages/alembic-1.13.1.dist-info/RECORD
@@ -0,0 +1,150 @@
+../../Scripts/alembic.exe,sha256=KXWGNpqCYb_4UtGZiSVmkcasp0GF2JGG3ZdijkCBm3o,108409
+alembic-1.13.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+alembic-1.13.1.dist-info/LICENSE,sha256=soUmiob0QW6vTQWyrjiAwVb3xZqPk1pAK8BW6vszrwg,1058
+alembic-1.13.1.dist-info/METADATA,sha256=W1F2NBRkhqW55HiGmEIpdmiRt2skU5wwJd21UHFbSdQ,7390
+alembic-1.13.1.dist-info/RECORD,,
+alembic-1.13.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
+alembic-1.13.1.dist-info/entry_points.txt,sha256=aykM30soxwGN0pB7etLc1q0cHJbL9dy46RnK9VX4LLw,48
+alembic-1.13.1.dist-info/top_level.txt,sha256=FwKWd5VsPFC8iQjpu1u9Cn-JnK3-V1RhUCmWqz1cl-s,8
+alembic/__init__.py,sha256=PMiQT_1tHyC_Od3GDBArBk7r14v8F62_xkzliPq9tLU,63
+alembic/__main__.py,sha256=373m7-TBh72JqrSMYviGrxCHZo-cnweM8AGF8A22PmY,78
+alembic/__pycache__/__init__.cpython-311.pyc,,
+alembic/__pycache__/__main__.cpython-311.pyc,,
+alembic/__pycache__/command.cpython-311.pyc,,
+alembic/__pycache__/config.cpython-311.pyc,,
+alembic/__pycache__/context.cpython-311.pyc,,
+alembic/__pycache__/environment.cpython-311.pyc,,
+alembic/__pycache__/migration.cpython-311.pyc,,
+alembic/__pycache__/op.cpython-311.pyc,,
+alembic/autogenerate/__init__.py,sha256=ntmUTXhjLm4_zmqIwyVaECdpPDn6_u1yM9vYk6-553E,543
+alembic/autogenerate/__pycache__/__init__.cpython-311.pyc,,
+alembic/autogenerate/__pycache__/api.cpython-311.pyc,,
+alembic/autogenerate/__pycache__/compare.cpython-311.pyc,,
+alembic/autogenerate/__pycache__/render.cpython-311.pyc,,
+alembic/autogenerate/__pycache__/rewriter.cpython-311.pyc,,
+alembic/autogenerate/api.py,sha256=Oc7MRtDhkSICsQ82fYP9bBMYaAjzzW2X_izM3AQU-OY,22171
+alembic/autogenerate/compare.py,sha256=3QLK2yCDW37bXbAIXcHGz4YOFlOW8bSfLHJ8bmzgG1M,44938
+alembic/autogenerate/render.py,sha256=uSbCpkh72mo00xGZ8CJa3teM_gqulCoNtxH0Ey8Ny1k,34939
+alembic/autogenerate/rewriter.py,sha256=uZWRkTYJoncoEJ5WY1QBRiozjyChqZDJPy4LtcRibjM,7846
+alembic/command.py,sha256=jWFNS-wPWA-Klfm0GsPfWh_8sPj4n7rKROJ0zrwhoR0,21712
+alembic/config.py,sha256=I12lm4V-AXSt-7nvub-Vtx5Zfa68pYP5xSrFQQd45rQ,22256
+alembic/context.py,sha256=hK1AJOQXJ29Bhn276GYcosxeG7pC5aZRT5E8c4bMJ4Q,195
+alembic/context.pyi,sha256=hUHbSnbSeEEMVkk0gDSXOq4_9edSjYzsjmmf-mL9Iao,31737
+alembic/ddl/__init__.py,sha256=Df8fy4Vn_abP8B7q3x8gyFwEwnLw6hs2Ljt_bV3EZWE,152
+alembic/ddl/__pycache__/__init__.cpython-311.pyc,,
+alembic/ddl/__pycache__/_autogen.cpython-311.pyc,,
+alembic/ddl/__pycache__/base.cpython-311.pyc,,
+alembic/ddl/__pycache__/impl.cpython-311.pyc,,
+alembic/ddl/__pycache__/mssql.cpython-311.pyc,,
+alembic/ddl/__pycache__/mysql.cpython-311.pyc,,
+alembic/ddl/__pycache__/oracle.cpython-311.pyc,,
+alembic/ddl/__pycache__/postgresql.cpython-311.pyc,,
+alembic/ddl/__pycache__/sqlite.cpython-311.pyc,,
+alembic/ddl/_autogen.py,sha256=0no9ywWP8gjvO57Ozc2naab4qNusVNn2fiJekjc275g,9179
+alembic/ddl/base.py,sha256=Jd7oPoAOGjOMcdMUIzSKnTjd8NKnTd7IjBXXyVpDCkU,9955
+alembic/ddl/impl.py,sha256=vkhkXFpLPJBG9jW2kv_sR5CC5czNd1ERLjLtfLuOFP0,28778
+alembic/ddl/mssql.py,sha256=ydvgBSaftKYjaBaMyqius66Ta4CICQSj79Og3Ed2atY,14219
+alembic/ddl/mysql.py,sha256=am221U_UK3wX33tNcXNiOObZV-Pa4CXuv7vN-epF9IU,16788
+alembic/ddl/oracle.py,sha256=TmoCq_FlbfyWAAk3e_q6mMQU0YmlfIcgKHpRfNMmgr0,6211
+alembic/ddl/postgresql.py,sha256=dcWLdDSqivzizVCce_H6RnOVAayPXDFfns-NC4-UaA8,29842
+alembic/ddl/sqlite.py,sha256=wLXhb8bJWRspKQTb-iVfepR4LXYgOuEbUWKX5qwDhIQ,7570
+alembic/environment.py,sha256=MM5lPayGT04H3aeng1H7GQ8HEAs3VGX5yy6mDLCPLT4,43
+alembic/migration.py,sha256=MV6Fju6rZtn2fTREKzXrCZM6aIBGII4OMZFix0X-GLs,41
+alembic/op.py,sha256=flHtcsVqOD-ZgZKK2pv-CJ5Cwh-KJ7puMUNXzishxLw,167
+alembic/op.pyi,sha256=8R6SJr1dQharU0blmMJJj23XFO_hk9ZmAQBJBQOeXRU,49816
+alembic/operations/__init__.py,sha256=e0KQSZAgLpTWvyvreB7DWg7RJV_MWSOPVDgCqsd2FzY,318
+alembic/operations/__pycache__/__init__.cpython-311.pyc,,
+alembic/operations/__pycache__/base.cpython-311.pyc,,
+alembic/operations/__pycache__/batch.cpython-311.pyc,,
+alembic/operations/__pycache__/ops.cpython-311.pyc,,
+alembic/operations/__pycache__/schemaobj.cpython-311.pyc,,
+alembic/operations/__pycache__/toimpl.cpython-311.pyc,,
+alembic/operations/base.py,sha256=LCx4NH5NA2NLWQFaZTqE_p2KgLtqJ76oVcp1Grj-zFM,74004
+alembic/operations/batch.py,sha256=YqtD4hJ3_RkFxvI7zbmBwxcLEyLHYyWQpsz4l5L85yI,26943
+alembic/operations/ops.py,sha256=2vtYFhYFDEVq158HwORfGTsobDM7c-t0lewuR7JKw7g,94353
+alembic/operations/schemaobj.py,sha256=vjoD57QvjbzzA-jyPIXulbOmb5_bGPtxoq58KsKI4Y0,9424
+alembic/operations/toimpl.py,sha256=SoNY2_gZX2baXTD-pNjpCWnON8D2QBSYQBIjo13-WKA,7115
+alembic/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+alembic/runtime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+alembic/runtime/__pycache__/__init__.cpython-311.pyc,,
+alembic/runtime/__pycache__/environment.cpython-311.pyc,,
+alembic/runtime/__pycache__/migration.cpython-311.pyc,,
+alembic/runtime/environment.py,sha256=9wSJaePNAXBXvirif_85ql7dSq4bXM1E6pSb2k-6uGI,41508
+alembic/runtime/migration.py,sha256=Yfv2fa11wiQ0WgoZaFldlWxCPq4dVDOCEOxST_-1VB0,50066
+alembic/script/__init__.py,sha256=lSj06O391Iy5avWAiq8SPs6N8RBgxkSPjP8wpXcNDGg,100
+alembic/script/__pycache__/__init__.cpython-311.pyc,,
+alembic/script/__pycache__/base.cpython-311.pyc,,
+alembic/script/__pycache__/revision.cpython-311.pyc,,
+alembic/script/__pycache__/write_hooks.cpython-311.pyc,,
+alembic/script/base.py,sha256=4gkppn2FKCYDoBgzGkTslcwdyxSabmHvGq0uGKulwbI,37586
+alembic/script/revision.py,sha256=sfnXQw2UwiXs0E6gEPHBKWuSsB5KyuxZPTrFn__hIEk,62060
+alembic/script/write_hooks.py,sha256=NGB6NGgfdf7HK6XNNpSKqUCfzxazj-NRUePgFx7MJSM,5036
+alembic/templates/async/README,sha256=ISVtAOvqvKk_5ThM5ioJE-lMkvf9IbknFUFVU_vPma4,58
+alembic/templates/async/__pycache__/env.cpython-311.pyc,,
+alembic/templates/async/alembic.ini.mako,sha256=uuhJETLWQuiYcs_jAOXHEjshEJ7VslEc1q4RRj0HWbE,3525
+alembic/templates/async/env.py,sha256=zbOCf3Y7w2lg92hxSwmG1MM_7y56i_oRH4AKp0pQBYo,2389
+alembic/templates/async/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1uy6HUS9NFvDgl93dMj8,635
+alembic/templates/generic/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38
+alembic/templates/generic/__pycache__/env.cpython-311.pyc,,
+alembic/templates/generic/alembic.ini.mako,sha256=sT7F852yN3c8X1-GKFlhuWExXxw9hY1eb1ZZ9flFSzc,3634
+alembic/templates/generic/env.py,sha256=TLRWOVW3Xpt_Tpf8JFzlnoPn_qoUu8UV77Y4o9XD6yI,2103
+alembic/templates/generic/script.py.mako,sha256=MEqL-2qATlST9TAOeYgscMn1uy6HUS9NFvDgl93dMj8,635
+alembic/templates/multidb/README,sha256=dWLDhnBgphA4Nzb7sNlMfCS3_06YqVbHhz-9O5JNqyI,606
+alembic/templates/multidb/__pycache__/env.cpython-311.pyc,,
+alembic/templates/multidb/alembic.ini.mako,sha256=mPh8JFJfWiGs6tMtL8_HAQ-Dz1QOoJgE5Vm76nIMqgU,3728
+alembic/templates/multidb/env.py,sha256=6zNjnW8mXGUk7erTsAvrfhvqoczJ-gagjVq1Ypg2YIQ,4230
+alembic/templates/multidb/script.py.mako,sha256=N06nMtNSwHkgl0EBXDyMt8njp9tlOesR583gfq21nbY,1090
+alembic/testing/__init__.py,sha256=kOxOh5nwmui9d-_CCq9WA4Udwy7ITjm453w74CTLZDo,1159
+alembic/testing/__pycache__/__init__.cpython-311.pyc,,
+alembic/testing/__pycache__/assertions.cpython-311.pyc,,
+alembic/testing/__pycache__/env.cpython-311.pyc,,
+alembic/testing/__pycache__/fixtures.cpython-311.pyc,,
+alembic/testing/__pycache__/requirements.cpython-311.pyc,,
+alembic/testing/__pycache__/schemacompare.cpython-311.pyc,,
+alembic/testing/__pycache__/util.cpython-311.pyc,,
+alembic/testing/__pycache__/warnings.cpython-311.pyc,,
+alembic/testing/assertions.py,sha256=1CbJk8c8-WO9eJ0XJ0jJvMsNRLUrXV41NOeIJUAlOBk,5015
+alembic/testing/env.py,sha256=zJacVb_z6uLs2U1TtkmnFH9P3_F-3IfYbVv4UEPOvfo,10754
+alembic/testing/fixtures.py,sha256=NyP4wE_dFN9ZzSGiBagRu1cdzkka03nwJYJYHYrrkSY,9112
+alembic/testing/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+alembic/testing/plugin/__pycache__/__init__.cpython-311.pyc,,
+alembic/testing/plugin/__pycache__/bootstrap.cpython-311.pyc,,
+alembic/testing/plugin/bootstrap.py,sha256=9C6wtjGrIVztZ928w27hsQE0KcjDLIUtUN3dvZKsMVk,50
+alembic/testing/requirements.py,sha256=dKeAO1l5TwBqXarJN-IPORlCqCJv-41Dj6oXoEikxHQ,5133
+alembic/testing/schemacompare.py,sha256=N5UqSNCOJetIKC4vKhpYzQEpj08XkdgIoqBmEPQ3tlc,4838
+alembic/testing/suite/__init__.py,sha256=MvE7-hwbaVN1q3NM-ztGxORU9dnIelUCINKqNxewn7Y,288
+alembic/testing/suite/__pycache__/__init__.cpython-311.pyc,,
+alembic/testing/suite/__pycache__/_autogen_fixtures.cpython-311.pyc,,
+alembic/testing/suite/__pycache__/test_autogen_comments.cpython-311.pyc,,
+alembic/testing/suite/__pycache__/test_autogen_computed.cpython-311.pyc,,
+alembic/testing/suite/__pycache__/test_autogen_diffs.cpython-311.pyc,,
+alembic/testing/suite/__pycache__/test_autogen_fks.cpython-311.pyc,,
+alembic/testing/suite/__pycache__/test_autogen_identity.cpython-311.pyc,,
+alembic/testing/suite/__pycache__/test_environment.cpython-311.pyc,,
+alembic/testing/suite/__pycache__/test_op.cpython-311.pyc,,
+alembic/testing/suite/_autogen_fixtures.py,sha256=cDq1pmzHe15S6dZPGNC6sqFaCQ3hLT_oPV2IDigUGQ0,9880
+alembic/testing/suite/test_autogen_comments.py,sha256=aEGqKUDw4kHjnDk298aoGcQvXJWmZXcIX_2FxH4cJK8,6283
+alembic/testing/suite/test_autogen_computed.py,sha256=qJeBpc8urnwTFvbwWrSTIbHVkRUuCXP-dKaNbUK2U2U,6077
+alembic/testing/suite/test_autogen_diffs.py,sha256=T4SR1n_kmcOKYhR4W1-dA0e5sddJ69DSVL2HW96kAkE,8394
+alembic/testing/suite/test_autogen_fks.py,sha256=AqFmb26Buex167HYa9dZWOk8x-JlB1OK3bwcvvjDFaU,32927
+alembic/testing/suite/test_autogen_identity.py,sha256=kcuqngG7qXAKPJDX4U8sRzPKHEJECHuZ0DtuaS6tVkk,5824
+alembic/testing/suite/test_environment.py,sha256=w9F0xnLEbALeR8k6_-Tz6JHvy91IqiTSypNasVzXfZQ,11877
+alembic/testing/suite/test_op.py,sha256=2XQCdm_NmnPxHGuGj7hmxMzIhKxXNotUsKdACXzE1mM,1343
+alembic/testing/util.py,sha256=CQrcQDA8fs_7ME85z5ydb-Bt70soIIID-qNY1vbR2dg,3350
+alembic/testing/warnings.py,sha256=RxA7x_8GseANgw07Us8JN_1iGbANxaw6_VitX2ZGQH4,1078
+alembic/util/__init__.py,sha256=KSZ7UT2YzH6CietgUtljVoE3QnGjoFKOi7RL5sgUxrk,1688
+alembic/util/__pycache__/__init__.cpython-311.pyc,,
+alembic/util/__pycache__/compat.cpython-311.pyc,,
+alembic/util/__pycache__/editor.cpython-311.pyc,,
+alembic/util/__pycache__/exc.cpython-311.pyc,,
+alembic/util/__pycache__/langhelpers.cpython-311.pyc,,
+alembic/util/__pycache__/messaging.cpython-311.pyc,,
+alembic/util/__pycache__/pyfiles.cpython-311.pyc,,
+alembic/util/__pycache__/sqla_compat.cpython-311.pyc,,
+alembic/util/compat.py,sha256=RjHdQa1NomU3Zlvgfvza0OMiSRQSLRL3xVl3OdUy2UE,2594
+alembic/util/editor.py,sha256=JIz6_BdgV8_oKtnheR6DZoB7qnrHrlRgWjx09AsTsUw,2546
+alembic/util/exc.py,sha256=KQTru4zcgAmN4IxLMwLFS56XToUewaXB7oOLcPNjPwg,98
+alembic/util/langhelpers.py,sha256=KYyOjFjJ26evPmrwhdTvLQNXN0bK7AIy5sRdKD91Fvg,10038
+alembic/util/messaging.py,sha256=BM5OCZ6qmLftFRw5yPSxj539_QmfVwNYoU8qYsDqoJY,3132
+alembic/util/pyfiles.py,sha256=zltVdcwEJJCPS2gHsQvkHkQakuF6wXiZ6zfwHbGNT0g,3489
+alembic/util/sqla_compat.py,sha256=toD1S63PgZ6iEteP9bwIf5E7DIUdQPo0UQ_Fn18qWnI,19536
diff --git a/venv/Lib/site-packages/alembic-1.13.1.dist-info/WHEEL b/venv/Lib/site-packages/alembic-1.13.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c
--- /dev/null
+++ b/venv/Lib/site-packages/alembic-1.13.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.42.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/venv/Lib/site-packages/alembic-1.13.1.dist-info/entry_points.txt b/venv/Lib/site-packages/alembic-1.13.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5945268170930d99b886c9a90beeaa0e025aae3b
--- /dev/null
+++ b/venv/Lib/site-packages/alembic-1.13.1.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[console_scripts]
+alembic = alembic.config:main
diff --git a/venv/Lib/site-packages/alembic-1.13.1.dist-info/top_level.txt b/venv/Lib/site-packages/alembic-1.13.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b5bd98d32e203d1fc0a298ee24c7b3d3a20e1ad6
--- /dev/null
+++ b/venv/Lib/site-packages/alembic-1.13.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+alembic
diff --git a/venv/Lib/site-packages/alembic/__init__.py b/venv/Lib/site-packages/alembic/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c153c8aaf05d75aadc3ddd793fe4ef5d8b3086e1
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/__init__.py
@@ -0,0 +1,4 @@
+from . import context
+from . import op
+
+__version__ = "1.13.1"
diff --git a/venv/Lib/site-packages/alembic/__main__.py b/venv/Lib/site-packages/alembic/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..af1b8e8702f4986b4e17ca129dab18f338803da4
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/__main__.py
@@ -0,0 +1,4 @@
+from .config import main
+
+if __name__ == "__main__":
+    main(prog="alembic")
diff --git a/venv/Lib/site-packages/alembic/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1bb33445a3388c19aa9b2fbacfe6452626484f26
Binary files /dev/null and b/venv/Lib/site-packages/alembic/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/__pycache__/__main__.cpython-311.pyc b/venv/Lib/site-packages/alembic/__pycache__/__main__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5b288d3d8d03376f368cac610fd25240a2a1d44
Binary files /dev/null and b/venv/Lib/site-packages/alembic/__pycache__/__main__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/__pycache__/command.cpython-311.pyc b/venv/Lib/site-packages/alembic/__pycache__/command.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a067f0a135a35fa1f4af5c86a7ca2b5b17e3577d
Binary files /dev/null and b/venv/Lib/site-packages/alembic/__pycache__/command.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/__pycache__/config.cpython-311.pyc b/venv/Lib/site-packages/alembic/__pycache__/config.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0e4aeda8d5f178240184f572521e0ba6f018bd2c
Binary files /dev/null and b/venv/Lib/site-packages/alembic/__pycache__/config.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/__pycache__/context.cpython-311.pyc b/venv/Lib/site-packages/alembic/__pycache__/context.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1510252a70e248a90a4916761dc01a2bc364b2c
Binary files /dev/null and b/venv/Lib/site-packages/alembic/__pycache__/context.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/__pycache__/environment.cpython-311.pyc b/venv/Lib/site-packages/alembic/__pycache__/environment.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..803ae2b4c5cb19993708ea4c54324e149bc36aa6
Binary files /dev/null and b/venv/Lib/site-packages/alembic/__pycache__/environment.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/__pycache__/migration.cpython-311.pyc b/venv/Lib/site-packages/alembic/__pycache__/migration.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5471cfb33010d47d1d8712042c718d07bc661fb3
Binary files /dev/null and b/venv/Lib/site-packages/alembic/__pycache__/migration.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/__pycache__/op.cpython-311.pyc b/venv/Lib/site-packages/alembic/__pycache__/op.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..192f6cc2b3c0e6b08effb0c336093424c0b9171a
Binary files /dev/null and b/venv/Lib/site-packages/alembic/__pycache__/op.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/autogenerate/__init__.py b/venv/Lib/site-packages/alembic/autogenerate/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..445ddb25125aa63994052dd4ecea1362dc91656d
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/autogenerate/__init__.py
@@ -0,0 +1,10 @@
+from .api import _render_migration_diffs as _render_migration_diffs
+from .api import compare_metadata as compare_metadata
+from .api import produce_migrations as produce_migrations
+from .api import render_python_code as render_python_code
+from .api import RevisionContext as RevisionContext
+from .compare import _produce_net_changes as _produce_net_changes
+from .compare import comparators as comparators
+from .render import render_op_text as render_op_text
+from .render import renderers as renderers
+from .rewriter import Rewriter as Rewriter
diff --git a/venv/Lib/site-packages/alembic/autogenerate/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7881822ca4d1dcd4451360c3a7ac94e0a109e22b
Binary files /dev/null and b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/autogenerate/__pycache__/api.cpython-311.pyc b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/api.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b1f46ca79ed28e334fbf113ec3a42ac3a74fc22
Binary files /dev/null and b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/api.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/autogenerate/__pycache__/compare.cpython-311.pyc b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/compare.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5dc04e6ce3edf668be41b418a75a30b910bf997b
Binary files /dev/null and b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/compare.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/autogenerate/__pycache__/render.cpython-311.pyc b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/render.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49bc186e0821a04825ae3a6210f7808bb67a2001
Binary files /dev/null and b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/render.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/autogenerate/__pycache__/rewriter.cpython-311.pyc b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/rewriter.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..755da582d0696322613308505604264970f0ecc4
Binary files /dev/null and b/venv/Lib/site-packages/alembic/autogenerate/__pycache__/rewriter.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/autogenerate/api.py b/venv/Lib/site-packages/alembic/autogenerate/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa8f32f65359c9c04f41ea24e21131beee2d8d2a
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/autogenerate/api.py
@@ -0,0 +1,650 @@
+from __future__ import annotations
+
+import contextlib
+from typing import Any
+from typing import Dict
+from typing import Iterator
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Set
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import inspect
+
+from . import compare
+from . import render
+from .. import util
+from ..operations import ops
+from ..util import sqla_compat
+
+"""Provide the 'autogenerate' feature which can produce migration operations
+automatically."""
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.engine import Inspector
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.schema import Table
+
+    from ..config import Config
+    from ..operations.ops import DowngradeOps
+    from ..operations.ops import MigrationScript
+    from ..operations.ops import UpgradeOps
+    from ..runtime.environment import NameFilterParentNames
+    from ..runtime.environment import NameFilterType
+    from ..runtime.environment import ProcessRevisionDirectiveFn
+    from ..runtime.environment import RenderItemFn
+    from ..runtime.migration import MigrationContext
+    from ..script.base import Script
+    from ..script.base import ScriptDirectory
+    from ..script.revision import _GetRevArg
+
+
+def compare_metadata(context: MigrationContext, metadata: MetaData) -> Any:
+    """Compare a database schema to that given in a
+    :class:`~sqlalchemy.schema.MetaData` instance.
+
+    The database connection is presented in the context
+    of a :class:`.MigrationContext` object, which
+    provides database connectivity as well as optional
+    comparison functions to use for datatypes and
+    server defaults - see the "autogenerate" arguments
+    at :meth:`.EnvironmentContext.configure`
+    for details on these.
+
+    The return format is a list of "diff" directives,
+    each representing individual differences::
+
+        from alembic.migration import MigrationContext
+        from alembic.autogenerate import compare_metadata
+        from sqlalchemy import (
+            create_engine,
+            MetaData,
+            Column,
+            Integer,
+            String,
+            Table,
+            text,
+        )
+        import pprint
+
+        engine = create_engine("sqlite://")
+
+        with engine.begin() as conn:
+            conn.execute(
+                text(
+                    '''
+                        create table foo (
+                            id integer not null primary key,
+                            old_data varchar,
+                            x integer
+                        )
+                    '''
+                )
+            )
+            conn.execute(text("create table bar (data varchar)"))
+
+        metadata = MetaData()
+        Table(
+            "foo",
+            metadata,
+            Column("id", Integer, primary_key=True),
+            Column("data", Integer),
+            Column("x", Integer, nullable=False),
+        )
+        Table("bat", metadata, Column("info", String))
+
+        mc = MigrationContext.configure(engine.connect())
+
+        diff = compare_metadata(mc, metadata)
+        pprint.pprint(diff, indent=2, width=20)
+
+    Output::
+
+        [
+            (
+                "add_table",
+                Table(
+                    "bat",
+                    MetaData(),
+                    Column("info", String(), table=<bat>),
+                    schema=None,
+                ),
+            ),
+            (
+                "remove_table",
+                Table(
+                    "bar",
+                    MetaData(),
+                    Column("data", VARCHAR(), table=<bar>),
+                    schema=None,
+                ),
+            ),
+            (
+                "add_column",
+                None,
+                "foo",
+                Column("data", Integer(), table=<foo>),
+            ),
+            [
+                (
+                    "modify_nullable",
+                    None,
+                    "foo",
+                    "x",
+                    {
+                        "existing_comment": None,
+                        "existing_server_default": False,
+                        "existing_type": INTEGER(),
+                    },
+                    True,
+                    False,
+                )
+            ],
+            (
+                "remove_column",
+                None,
+                "foo",
+                Column("old_data", VARCHAR(), table=<foo>),
+            ),
+        ]
+
+    :param context: a :class:`.MigrationContext`
+     instance.
+    :param metadata: a :class:`~sqlalchemy.schema.MetaData`
+     instance.
+
+    .. seealso::
+
+        :func:`.produce_migrations` - produces a :class:`.MigrationScript`
+        structure based on metadata comparison.
+
+    """
+
+    migration_script = produce_migrations(context, metadata)
+    assert migration_script.upgrade_ops is not None
+    return migration_script.upgrade_ops.as_diffs()
+
+
+def produce_migrations(
+    context: MigrationContext, metadata: MetaData
+) -> MigrationScript:
+    """Produce a :class:`.MigrationScript` structure based on schema
+    comparison.
+
+    This function does essentially what :func:`.compare_metadata` does,
+    but then runs the resulting list of diffs to produce the full
+    :class:`.MigrationScript` object.   For an example of what this looks like,
+    see the example in :ref:`customizing_revision`.
+
+    .. seealso::
+
+        :func:`.compare_metadata` - returns more fundamental "diff"
+        data from comparing a schema.
+
+    """
+
+    autogen_context = AutogenContext(context, metadata=metadata)
+
+    migration_script = ops.MigrationScript(
+        rev_id=None,
+        upgrade_ops=ops.UpgradeOps([]),
+        downgrade_ops=ops.DowngradeOps([]),
+    )
+
+    compare._populate_migration_script(autogen_context, migration_script)
+
+    return migration_script
+
+
+def render_python_code(
+    up_or_down_op: Union[UpgradeOps, DowngradeOps],
+    sqlalchemy_module_prefix: str = "sa.",
+    alembic_module_prefix: str = "op.",
+    render_as_batch: bool = False,
+    imports: Sequence[str] = (),
+    render_item: Optional[RenderItemFn] = None,
+    migration_context: Optional[MigrationContext] = None,
+    user_module_prefix: Optional[str] = None,
+) -> str:
+    """Render Python code given an :class:`.UpgradeOps` or
+    :class:`.DowngradeOps` object.
+
+    This is a convenience function that can be used to test the
+    autogenerate output of a user-defined :class:`.MigrationScript` structure.
+
+    :param up_or_down_op: :class:`.UpgradeOps` or :class:`.DowngradeOps` object
+    :param sqlalchemy_module_prefix: module prefix for SQLAlchemy objects
+    :param alembic_module_prefix: module prefix for Alembic constructs
+    :param render_as_batch: use "batch operations" style for rendering
+    :param imports: sequence of import symbols to add
+    :param render_item: callable to render items
+    :param migration_context: optional :class:`.MigrationContext`
+    :param user_module_prefix: optional string prefix for user-defined types
+
+     .. versionadded:: 1.11.0
+
+    """
+    opts = {
+        "sqlalchemy_module_prefix": sqlalchemy_module_prefix,
+        "alembic_module_prefix": alembic_module_prefix,
+        "render_item": render_item,
+        "render_as_batch": render_as_batch,
+        "user_module_prefix": user_module_prefix,
+    }
+
+    if migration_context is None:
+        from ..runtime.migration import MigrationContext
+        from sqlalchemy.engine.default import DefaultDialect
+
+        migration_context = MigrationContext.configure(
+            dialect=DefaultDialect()
+        )
+
+    autogen_context = AutogenContext(migration_context, opts=opts)
+    autogen_context.imports = set(imports)
+    return render._indent(
+        render._render_cmd_body(up_or_down_op, autogen_context)
+    )
+
+
+def _render_migration_diffs(
+    context: MigrationContext, template_args: Dict[Any, Any]
+) -> None:
+    """legacy, used by test_autogen_composition at the moment"""
+
+    autogen_context = AutogenContext(context)
+
+    upgrade_ops = ops.UpgradeOps([])
+    compare._produce_net_changes(autogen_context, upgrade_ops)
+
+    migration_script = ops.MigrationScript(
+        rev_id=None,
+        upgrade_ops=upgrade_ops,
+        downgrade_ops=upgrade_ops.reverse(),
+    )
+
+    render._render_python_into_templatevars(
+        autogen_context, migration_script, template_args
+    )
+
+
+class AutogenContext:
+    """Maintains configuration and state that's specific to an
+    autogenerate operation."""
+
+    metadata: Optional[MetaData] = None
+    """The :class:`~sqlalchemy.schema.MetaData` object
+    representing the destination.
+
+    This object is the one that is passed within ``env.py``
+    to the :paramref:`.EnvironmentContext.configure.target_metadata`
+    parameter.  It represents the structure of :class:`.Table` and other
+    objects as stated in the current database model, and represents the
+    destination structure for the database being examined.
+
+    While the :class:`~sqlalchemy.schema.MetaData` object is primarily
+    known as a collection of :class:`~sqlalchemy.schema.Table` objects,
+    it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary
+    that may be used by end-user schemes to store additional schema-level
+    objects that are to be compared in custom autogeneration schemes.
+
+    """
+
+    connection: Optional[Connection] = None
+    """The :class:`~sqlalchemy.engine.base.Connection` object currently
+    connected to the database backend being compared.
+
+    This is obtained from the :attr:`.MigrationContext.bind` and is
+    ultimately set up in the ``env.py`` script.
+
+    """
+
+    dialect: Optional[Dialect] = None
+    """The :class:`~sqlalchemy.engine.Dialect` object currently in use.
+
+    This is normally obtained from the
+    :attr:`~sqlalchemy.engine.base.Connection.dialect` attribute.
+
+    """
+
+    imports: Set[str] = None  # type: ignore[assignment]
+    """A ``set()`` which contains string Python import directives.
+
+    The directives are to be rendered into the ``${imports}`` section
+    of a script template.  The set is normally empty and can be modified
+    within hooks such as the
+    :paramref:`.EnvironmentContext.configure.render_item` hook.
+
+    .. seealso::
+
+        :ref:`autogen_render_types`
+
+    """
+
+    migration_context: MigrationContext = None  # type: ignore[assignment]
+    """The :class:`.MigrationContext` established by the ``env.py`` script."""
+
+    def __init__(
+        self,
+        migration_context: MigrationContext,
+        metadata: Optional[MetaData] = None,
+        opts: Optional[Dict[str, Any]] = None,
+        autogenerate: bool = True,
+    ) -> None:
+        if (
+            autogenerate
+            and migration_context is not None
+            and migration_context.as_sql
+        ):
+            raise util.CommandError(
+                "autogenerate can't use as_sql=True as it prevents querying "
+                "the database for schema information"
+            )
+
+        if opts is None:
+            opts = migration_context.opts
+
+        self.metadata = metadata = (
+            opts.get("target_metadata", None) if metadata is None else metadata
+        )
+
+        if (
+            autogenerate
+            and metadata is None
+            and migration_context is not None
+            and migration_context.script is not None
+        ):
+            raise util.CommandError(
+                "Can't proceed with --autogenerate option; environment "
+                "script %s does not provide "
+                "a MetaData object or sequence of objects to the context."
+                % (migration_context.script.env_py_location)
+            )
+
+        include_object = opts.get("include_object", None)
+        include_name = opts.get("include_name", None)
+
+        object_filters = []
+        name_filters = []
+        if include_object:
+            object_filters.append(include_object)
+        if include_name:
+            name_filters.append(include_name)
+
+        self._object_filters = object_filters
+        self._name_filters = name_filters
+
+        self.migration_context = migration_context
+        if self.migration_context is not None:
+            self.connection = self.migration_context.bind
+            self.dialect = self.migration_context.dialect
+
+        self.imports = set()
+        self.opts: Dict[str, Any] = opts
+        self._has_batch: bool = False
+
+    @util.memoized_property
+    def inspector(self) -> Inspector:
+        if self.connection is None:
+            raise TypeError(
+                "can't return inspector as this "
+                "AutogenContext has no database connection"
+            )
+        return inspect(self.connection)
+
+    @contextlib.contextmanager
+    def _within_batch(self) -> Iterator[None]:
+        self._has_batch = True
+        yield
+        self._has_batch = False
+
+    def run_name_filters(
+        self,
+        name: Optional[str],
+        type_: NameFilterType,
+        parent_names: NameFilterParentNames,
+    ) -> bool:
+        """Run the context's name filters and return True if the targets
+        should be part of the autogenerate operation.
+
+        This method should be run for every kind of name encountered within the
+        reflection side of an autogenerate operation, giving the environment
+        the chance to filter what names should be reflected as database
+        objects.  The filters here are produced directly via the
+        :paramref:`.EnvironmentContext.configure.include_name` parameter.
+
+        """
+        if "schema_name" in parent_names:
+            if type_ == "table":
+                table_name = name
+            else:
+                table_name = parent_names.get("table_name", None)
+            if table_name:
+                schema_name = parent_names["schema_name"]
+                if schema_name:
+                    parent_names["schema_qualified_table_name"] = "%s.%s" % (
+                        schema_name,
+                        table_name,
+                    )
+                else:
+                    parent_names["schema_qualified_table_name"] = table_name
+
+        for fn in self._name_filters:
+            if not fn(name, type_, parent_names):
+                return False
+        else:
+            return True
+
+    def run_object_filters(
+        self,
+        object_: SchemaItem,
+        name: sqla_compat._ConstraintName,
+        type_: NameFilterType,
+        reflected: bool,
+        compare_to: Optional[SchemaItem],
+    ) -> bool:
+        """Run the context's object filters and return True if the targets
+        should be part of the autogenerate operation.
+
+        This method should be run for every kind of object encountered within
+        an autogenerate operation, giving the environment the chance
+        to filter what objects should be included in the comparison.
+        The filters here are produced directly via the
+        :paramref:`.EnvironmentContext.configure.include_object` parameter.
+
+        """
+        for fn in self._object_filters:
+            if not fn(object_, name, type_, reflected, compare_to):
+                return False
+        else:
+            return True
+
+    run_filters = run_object_filters
+
+    @util.memoized_property
+    def sorted_tables(self) -> List[Table]:
+        """Return an aggregate of the :attr:`.MetaData.sorted_tables`
+        collection(s).
+
+        For a sequence of :class:`.MetaData` objects, this
+        concatenates the :attr:`.MetaData.sorted_tables` collection
+        for each individual :class:`.MetaData`  in the order of the
+        sequence.  It does **not** collate the sorted tables collections.
+
+        """
+        result = []
+        for m in util.to_list(self.metadata):
+            result.extend(m.sorted_tables)
+        return result
+
+    @util.memoized_property
+    def table_key_to_table(self) -> Dict[str, Table]:
+        """Return an aggregate  of the :attr:`.MetaData.tables` dictionaries.
+
+        The :attr:`.MetaData.tables` collection is a dictionary of table key
+        to :class:`.Table`; this method aggregates the dictionary across
+        multiple :class:`.MetaData` objects into one dictionary.
+
+        Duplicate table keys are **not** supported; if two :class:`.MetaData`
+        objects contain the same table key, an exception is raised.
+
+        """
+        result: Dict[str, Table] = {}
+        for m in util.to_list(self.metadata):
+            intersect = set(result).intersection(set(m.tables))
+            if intersect:
+                raise ValueError(
+                    "Duplicate table keys across multiple "
+                    "MetaData objects: %s"
+                    % (", ".join('"%s"' % key for key in sorted(intersect)))
+                )
+
+            result.update(m.tables)
+        return result
+
+
+class RevisionContext:
+    """Maintains configuration and state that's specific to a revision
+    file generation operation."""
+
+    generated_revisions: List[MigrationScript]
+    process_revision_directives: Optional[ProcessRevisionDirectiveFn]
+
+    def __init__(
+        self,
+        config: Config,
+        script_directory: ScriptDirectory,
+        command_args: Dict[str, Any],
+        process_revision_directives: Optional[
+            ProcessRevisionDirectiveFn
+        ] = None,
+    ) -> None:
+        self.config = config
+        self.script_directory = script_directory
+        self.command_args = command_args
+        self.process_revision_directives = process_revision_directives
+        self.template_args = {
+            "config": config  # Let templates use config for
+            # e.g. multiple databases
+        }
+        self.generated_revisions = [self._default_revision()]
+
+    def _to_script(
+        self, migration_script: MigrationScript
+    ) -> Optional[Script]:
+        template_args: Dict[str, Any] = self.template_args.copy()
+
+        if getattr(migration_script, "_needs_render", False):
+            autogen_context = self._last_autogen_context
+
+            # clear out existing imports if we are doing multiple
+            # renders
+            autogen_context.imports = set()
+            if migration_script.imports:
+                autogen_context.imports.update(migration_script.imports)
+            render._render_python_into_templatevars(
+                autogen_context, migration_script, template_args
+            )
+
+        assert migration_script.rev_id is not None
+        return self.script_directory.generate_revision(
+            migration_script.rev_id,
+            migration_script.message,
+            refresh=True,
+            head=migration_script.head,
+            splice=migration_script.splice,
+            branch_labels=migration_script.branch_label,
+            version_path=migration_script.version_path,
+            depends_on=migration_script.depends_on,
+            **template_args,
+        )
+
+    def run_autogenerate(
+        self, rev: _GetRevArg, migration_context: MigrationContext
+    ) -> None:
+        self._run_environment(rev, migration_context, True)
+
+    def run_no_autogenerate(
+        self, rev: _GetRevArg, migration_context: MigrationContext
+    ) -> None:
+        self._run_environment(rev, migration_context, False)
+
+    def _run_environment(
+        self,
+        rev: _GetRevArg,
+        migration_context: MigrationContext,
+        autogenerate: bool,
+    ) -> None:
+        if autogenerate:
+            if self.command_args["sql"]:
+                raise util.CommandError(
+                    "Using --sql with --autogenerate does not make any sense"
+                )
+            if set(self.script_directory.get_revisions(rev)) != set(
+                self.script_directory.get_revisions("heads")
+            ):
+                raise util.CommandError("Target database is not up to date.")
+
+        upgrade_token = migration_context.opts["upgrade_token"]
+        downgrade_token = migration_context.opts["downgrade_token"]
+
+        migration_script = self.generated_revisions[-1]
+        if not getattr(migration_script, "_needs_render", False):
+            migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token
+            migration_script.downgrade_ops_list[
+                -1
+            ].downgrade_token = downgrade_token
+            migration_script._needs_render = True
+        else:
+            migration_script._upgrade_ops.append(
+                ops.UpgradeOps([], upgrade_token=upgrade_token)
+            )
+            migration_script._downgrade_ops.append(
+                ops.DowngradeOps([], downgrade_token=downgrade_token)
+            )
+
+        autogen_context = AutogenContext(
+            migration_context, autogenerate=autogenerate
+        )
+        self._last_autogen_context: AutogenContext = autogen_context
+
+        if autogenerate:
+            compare._populate_migration_script(
+                autogen_context, migration_script
+            )
+
+        if self.process_revision_directives:
+            self.process_revision_directives(
+                migration_context, rev, self.generated_revisions
+            )
+
+        hook = migration_context.opts["process_revision_directives"]
+        if hook:
+            hook(migration_context, rev, self.generated_revisions)
+
+        for migration_script in self.generated_revisions:
+            migration_script._needs_render = True
+
+    def _default_revision(self) -> MigrationScript:
+        command_args: Dict[str, Any] = self.command_args
+        op = ops.MigrationScript(
+            rev_id=command_args["rev_id"] or util.rev_id(),
+            message=command_args["message"],
+            upgrade_ops=ops.UpgradeOps([]),
+            downgrade_ops=ops.DowngradeOps([]),
+            head=command_args["head"],
+            splice=command_args["splice"],
+            branch_label=command_args["branch_label"],
+            version_path=command_args["version_path"],
+            depends_on=command_args["depends_on"],
+        )
+        return op
+
+    def generate_scripts(self) -> Iterator[Optional[Script]]:
+        for generated_revision in self.generated_revisions:
+            yield self._to_script(generated_revision)
diff --git a/venv/Lib/site-packages/alembic/autogenerate/compare.py b/venv/Lib/site-packages/alembic/autogenerate/compare.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcef531a544eefdc3baacff66aa5fa70229f9b58
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/autogenerate/compare.py
@@ -0,0 +1,1329 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import contextlib
+import logging
+import re
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import Iterator
+from typing import Mapping
+from typing import Optional
+from typing import Set
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy import event
+from sqlalchemy import inspect
+from sqlalchemy import schema as sa_schema
+from sqlalchemy import text
+from sqlalchemy import types as sqltypes
+from sqlalchemy.sql import expression
+from sqlalchemy.sql.schema import ForeignKeyConstraint
+from sqlalchemy.sql.schema import Index
+from sqlalchemy.sql.schema import UniqueConstraint
+from sqlalchemy.util import OrderedSet
+
+from .. import util
+from ..ddl._autogen import is_index_sig
+from ..ddl._autogen import is_uq_sig
+from ..operations import ops
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.engine.reflection import Inspector
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Table
+
+    from alembic.autogenerate.api import AutogenContext
+    from alembic.ddl.impl import DefaultImpl
+    from alembic.operations.ops import AlterColumnOp
+    from alembic.operations.ops import MigrationScript
+    from alembic.operations.ops import ModifyTableOps
+    from alembic.operations.ops import UpgradeOps
+    from ..ddl._autogen import _constraint_sig
+
+
+log = logging.getLogger(__name__)
+
+
+def _populate_migration_script(
+    autogen_context: AutogenContext, migration_script: MigrationScript
+) -> None:
+    upgrade_ops = migration_script.upgrade_ops_list[-1]
+    downgrade_ops = migration_script.downgrade_ops_list[-1]
+
+    _produce_net_changes(autogen_context, upgrade_ops)
+    upgrade_ops.reverse_into(downgrade_ops)
+
+
+comparators = util.Dispatcher(uselist=True)
+
+
+def _produce_net_changes(
+    autogen_context: AutogenContext, upgrade_ops: UpgradeOps
+) -> None:
+    connection = autogen_context.connection
+    assert connection is not None
+    include_schemas = autogen_context.opts.get("include_schemas", False)
+
+    inspector: Inspector = inspect(connection)
+
+    default_schema = connection.dialect.default_schema_name
+    schemas: Set[Optional[str]]
+    if include_schemas:
+        schemas = set(inspector.get_schema_names())
+        # replace default schema name with None
+        schemas.discard("information_schema")
+        # replace the "default" schema with None
+        schemas.discard(default_schema)
+        schemas.add(None)
+    else:
+        schemas = {None}
+
+    schemas = {
+        s for s in schemas if autogen_context.run_name_filters(s, "schema", {})
+    }
+
+    assert autogen_context.dialect is not None
+    comparators.dispatch("schema", autogen_context.dialect.name)(
+        autogen_context, upgrade_ops, schemas
+    )
+
+
+@comparators.dispatch_for("schema")
+def _autogen_for_tables(
+    autogen_context: AutogenContext,
+    upgrade_ops: UpgradeOps,
+    schemas: Union[Set[None], Set[Optional[str]]],
+) -> None:
+    inspector = autogen_context.inspector
+
+    conn_table_names: Set[Tuple[Optional[str], str]] = set()
+
+    version_table_schema = (
+        autogen_context.migration_context.version_table_schema
+    )
+    version_table = autogen_context.migration_context.version_table
+
+    for schema_name in schemas:
+        tables = set(inspector.get_table_names(schema=schema_name))
+        if schema_name == version_table_schema:
+            tables = tables.difference(
+                [autogen_context.migration_context.version_table]
+            )
+
+        conn_table_names.update(
+            (schema_name, tname)
+            for tname in tables
+            if autogen_context.run_name_filters(
+                tname, "table", {"schema_name": schema_name}
+            )
+        )
+
+    metadata_table_names = OrderedSet(
+        [(table.schema, table.name) for table in autogen_context.sorted_tables]
+    ).difference([(version_table_schema, version_table)])
+
+    _compare_tables(
+        conn_table_names,
+        metadata_table_names,
+        inspector,
+        upgrade_ops,
+        autogen_context,
+    )
+
+
+def _compare_tables(
+    conn_table_names: set,
+    metadata_table_names: set,
+    inspector: Inspector,
+    upgrade_ops: UpgradeOps,
+    autogen_context: AutogenContext,
+) -> None:
+    default_schema = inspector.bind.dialect.default_schema_name
+
+    # tables coming from the connection will not have "schema"
+    # set if it matches default_schema_name; so we need a list
+    # of table names from local metadata that also have "None" if schema
+    # == default_schema_name.  Most setups will be like this anyway but
+    # some are not (see #170)
+    metadata_table_names_no_dflt_schema = OrderedSet(
+        [
+            (schema if schema != default_schema else None, tname)
+            for schema, tname in metadata_table_names
+        ]
+    )
+
+    # to adjust for the MetaData collection storing the tables either
+    # as "schemaname.tablename" or just "tablename", create a new lookup
+    # which will match the "non-default-schema" keys to the Table object.
+    tname_to_table = {
+        no_dflt_schema: autogen_context.table_key_to_table[
+            sa_schema._get_table_key(tname, schema)
+        ]
+        for no_dflt_schema, (schema, tname) in zip(
+            metadata_table_names_no_dflt_schema, metadata_table_names
+        )
+    }
+    metadata_table_names = metadata_table_names_no_dflt_schema
+
+    for s, tname in metadata_table_names.difference(conn_table_names):
+        name = "%s.%s" % (s, tname) if s else tname
+        metadata_table = tname_to_table[(s, tname)]
+        if autogen_context.run_object_filters(
+            metadata_table, tname, "table", False, None
+        ):
+            upgrade_ops.ops.append(
+                ops.CreateTableOp.from_table(metadata_table)
+            )
+            log.info("Detected added table %r", name)
+            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
+
+            comparators.dispatch("table")(
+                autogen_context,
+                modify_table_ops,
+                s,
+                tname,
+                None,
+                metadata_table,
+            )
+            if not modify_table_ops.is_empty():
+                upgrade_ops.ops.append(modify_table_ops)
+
+    removal_metadata = sa_schema.MetaData()
+    for s, tname in conn_table_names.difference(metadata_table_names):
+        name = sa_schema._get_table_key(tname, s)
+        exists = name in removal_metadata.tables
+        t = sa_schema.Table(tname, removal_metadata, schema=s)
+
+        if not exists:
+            event.listen(
+                t,
+                "column_reflect",
+                # fmt: off
+                autogen_context.migration_context.impl.
+                _compat_autogen_column_reflect
+                (inspector),
+                # fmt: on
+            )
+            sqla_compat._reflect_table(inspector, t)
+        if autogen_context.run_object_filters(t, tname, "table", True, None):
+            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
+
+            comparators.dispatch("table")(
+                autogen_context, modify_table_ops, s, tname, t, None
+            )
+            if not modify_table_ops.is_empty():
+                upgrade_ops.ops.append(modify_table_ops)
+
+            upgrade_ops.ops.append(ops.DropTableOp.from_table(t))
+            log.info("Detected removed table %r", name)
+
+    existing_tables = conn_table_names.intersection(metadata_table_names)
+
+    existing_metadata = sa_schema.MetaData()
+    conn_column_info = {}
+    for s, tname in existing_tables:
+        name = sa_schema._get_table_key(tname, s)
+        exists = name in existing_metadata.tables
+        t = sa_schema.Table(tname, existing_metadata, schema=s)
+        if not exists:
+            event.listen(
+                t,
+                "column_reflect",
+                # fmt: off
+                autogen_context.migration_context.impl.
+                _compat_autogen_column_reflect(inspector),
+                # fmt: on
+            )
+            sqla_compat._reflect_table(inspector, t)
+        conn_column_info[(s, tname)] = t
+
+    for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])):
+        s = s or None
+        name = "%s.%s" % (s, tname) if s else tname
+        metadata_table = tname_to_table[(s, tname)]
+        conn_table = existing_metadata.tables[name]
+
+        if autogen_context.run_object_filters(
+            metadata_table, tname, "table", False, conn_table
+        ):
+            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
+            with _compare_columns(
+                s,
+                tname,
+                conn_table,
+                metadata_table,
+                modify_table_ops,
+                autogen_context,
+                inspector,
+            ):
+                comparators.dispatch("table")(
+                    autogen_context,
+                    modify_table_ops,
+                    s,
+                    tname,
+                    conn_table,
+                    metadata_table,
+                )
+
+            if not modify_table_ops.is_empty():
+                upgrade_ops.ops.append(modify_table_ops)
+
+
+_IndexColumnSortingOps: Mapping[str, Any] = util.immutabledict(
+    {
+        "asc": expression.asc,
+        "desc": expression.desc,
+        "nulls_first": expression.nullsfirst,
+        "nulls_last": expression.nullslast,
+        "nullsfirst": expression.nullsfirst,  # 1_3 name
+        "nullslast": expression.nullslast,  # 1_3 name
+    }
+)
+
+
+def _make_index(
+    impl: DefaultImpl, params: Dict[str, Any], conn_table: Table
+) -> Optional[Index]:
+    exprs: list[Union[Column[Any], TextClause]] = []
+    sorting = params.get("column_sorting")
+
+    for num, col_name in enumerate(params["column_names"]):
+        item: Union[Column[Any], TextClause]
+        if col_name is None:
+            assert "expressions" in params
+            name = params["expressions"][num]
+            item = text(name)
+        else:
+            name = col_name
+            item = conn_table.c[col_name]
+        if sorting and name in sorting:
+            for operator in sorting[name]:
+                if operator in _IndexColumnSortingOps:
+                    item = _IndexColumnSortingOps[operator](item)
+        exprs.append(item)
+    ix = sa_schema.Index(
+        params["name"],
+        *exprs,
+        unique=params["unique"],
+        _table=conn_table,
+        **impl.adjust_reflected_dialect_options(params, "index"),
+    )
+    if "duplicates_constraint" in params:
+        ix.info["duplicates_constraint"] = params["duplicates_constraint"]
+    return ix
+
+
+def _make_unique_constraint(
+    impl: DefaultImpl, params: Dict[str, Any], conn_table: Table
+) -> UniqueConstraint:
+    uq = sa_schema.UniqueConstraint(
+        *[conn_table.c[cname] for cname in params["column_names"]],
+        name=params["name"],
+        **impl.adjust_reflected_dialect_options(params, "unique_constraint"),
+    )
+    if "duplicates_index" in params:
+        uq.info["duplicates_index"] = params["duplicates_index"]
+
+    return uq
+
+
+def _make_foreign_key(
+    params: Dict[str, Any], conn_table: Table
+) -> ForeignKeyConstraint:
+    tname = params["referred_table"]
+    if params["referred_schema"]:
+        tname = "%s.%s" % (params["referred_schema"], tname)
+
+    options = params.get("options", {})
+
+    const = sa_schema.ForeignKeyConstraint(
+        [conn_table.c[cname] for cname in params["constrained_columns"]],
+        ["%s.%s" % (tname, n) for n in params["referred_columns"]],
+        onupdate=options.get("onupdate"),
+        ondelete=options.get("ondelete"),
+        deferrable=options.get("deferrable"),
+        initially=options.get("initially"),
+        name=params["name"],
+    )
+    # needed by 0.7
+    conn_table.append_constraint(const)
+    return const
+
+
+@contextlib.contextmanager
+def _compare_columns(
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    conn_table: Table,
+    metadata_table: Table,
+    modify_table_ops: ModifyTableOps,
+    autogen_context: AutogenContext,
+    inspector: Inspector,
+) -> Iterator[None]:
+    name = "%s.%s" % (schema, tname) if schema else tname
+    metadata_col_names = OrderedSet(
+        c.name for c in metadata_table.c if not c.system
+    )
+    metadata_cols_by_name = {
+        c.name: c for c in metadata_table.c if not c.system
+    }
+
+    conn_col_names = {
+        c.name: c
+        for c in conn_table.c
+        if autogen_context.run_name_filters(
+            c.name, "column", {"table_name": tname, "schema_name": schema}
+        )
+    }
+
+    for cname in metadata_col_names.difference(conn_col_names):
+        if autogen_context.run_object_filters(
+            metadata_cols_by_name[cname], cname, "column", False, None
+        ):
+            modify_table_ops.ops.append(
+                ops.AddColumnOp.from_column_and_tablename(
+                    schema, tname, metadata_cols_by_name[cname]
+                )
+            )
+            log.info("Detected added column '%s.%s'", name, cname)
+
+    for colname in metadata_col_names.intersection(conn_col_names):
+        metadata_col = metadata_cols_by_name[colname]
+        conn_col = conn_table.c[colname]
+        if not autogen_context.run_object_filters(
+            metadata_col, colname, "column", False, conn_col
+        ):
+            continue
+        alter_column_op = ops.AlterColumnOp(tname, colname, schema=schema)
+
+        comparators.dispatch("column")(
+            autogen_context,
+            alter_column_op,
+            schema,
+            tname,
+            colname,
+            conn_col,
+            metadata_col,
+        )
+
+        if alter_column_op.has_changes():
+            modify_table_ops.ops.append(alter_column_op)
+
+    yield
+
+    for cname in set(conn_col_names).difference(metadata_col_names):
+        if autogen_context.run_object_filters(
+            conn_table.c[cname], cname, "column", True, None
+        ):
+            modify_table_ops.ops.append(
+                ops.DropColumnOp.from_column_and_tablename(
+                    schema, tname, conn_table.c[cname]
+                )
+            )
+            log.info("Detected removed column '%s.%s'", name, cname)
+
+
+_C = TypeVar("_C", bound=Union[UniqueConstraint, ForeignKeyConstraint, Index])
+
+
+@comparators.dispatch_for("table")
+def _compare_indexes_and_uniques(
+    autogen_context: AutogenContext,
+    modify_ops: ModifyTableOps,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    conn_table: Optional[Table],
+    metadata_table: Optional[Table],
+) -> None:
+    inspector = autogen_context.inspector
+    is_create_table = conn_table is None
+    is_drop_table = metadata_table is None
+    impl = autogen_context.migration_context.impl
+
+    # 1a. get raw indexes and unique constraints from metadata ...
+    if metadata_table is not None:
+        metadata_unique_constraints = {
+            uq
+            for uq in metadata_table.constraints
+            if isinstance(uq, sa_schema.UniqueConstraint)
+        }
+        metadata_indexes = set(metadata_table.indexes)
+    else:
+        metadata_unique_constraints = set()
+        metadata_indexes = set()
+
+    conn_uniques = conn_indexes = frozenset()  # type:ignore[var-annotated]
+
+    supports_unique_constraints = False
+
+    unique_constraints_duplicate_unique_indexes = False
+
+    if conn_table is not None:
+        # 1b. ... and from connection, if the table exists
+        try:
+            conn_uniques = inspector.get_unique_constraints(  # type:ignore[assignment] # noqa
+                tname, schema=schema
+            )
+            supports_unique_constraints = True
+        except NotImplementedError:
+            pass
+        except TypeError:
+            # number of arguments is off for the base
+            # method in SQLAlchemy due to the cache decorator
+            # not being present
+            pass
+        else:
+            conn_uniques = [  # type:ignore[assignment]
+                uq
+                for uq in conn_uniques
+                if autogen_context.run_name_filters(
+                    uq["name"],
+                    "unique_constraint",
+                    {"table_name": tname, "schema_name": schema},
+                )
+            ]
+            for uq in conn_uniques:
+                if uq.get("duplicates_index"):
+                    unique_constraints_duplicate_unique_indexes = True
+        try:
+            conn_indexes = inspector.get_indexes(  # type:ignore[assignment]
+                tname, schema=schema
+            )
+        except NotImplementedError:
+            pass
+        else:
+            conn_indexes = [  # type:ignore[assignment]
+                ix
+                for ix in conn_indexes
+                if autogen_context.run_name_filters(
+                    ix["name"],
+                    "index",
+                    {"table_name": tname, "schema_name": schema},
+                )
+            ]
+
+        # 2. convert conn-level objects from raw inspector records
+        # into schema objects
+        if is_drop_table:
+            # for DROP TABLE uniques are inline, don't need them
+            conn_uniques = set()  # type:ignore[assignment]
+        else:
+            conn_uniques = {  # type:ignore[assignment]
+                _make_unique_constraint(impl, uq_def, conn_table)
+                for uq_def in conn_uniques
+            }
+
+        conn_indexes = {  # type:ignore[assignment]
+            index
+            for index in (
+                _make_index(impl, ix, conn_table) for ix in conn_indexes
+            )
+            if index is not None
+        }
+
+    # 2a. if the dialect dupes unique indexes as unique constraints
+    # (mysql and oracle), correct for that
+
+    if unique_constraints_duplicate_unique_indexes:
+        _correct_for_uq_duplicates_uix(
+            conn_uniques,
+            conn_indexes,
+            metadata_unique_constraints,
+            metadata_indexes,
+            autogen_context.dialect,
+            impl,
+        )
+
+    # 3. give the dialect a chance to omit indexes and constraints that
+    # we know are either added implicitly by the DB or that the DB
+    # can't accurately report on
+    impl.correct_for_autogen_constraints(
+        conn_uniques,  # type: ignore[arg-type]
+        conn_indexes,  # type: ignore[arg-type]
+        metadata_unique_constraints,
+        metadata_indexes,
+    )
+
+    # 4. organize the constraints into "signature" collections, the
+    # _constraint_sig() objects provide a consistent facade over both
+    # Index and UniqueConstraint so we can easily work with them
+    # interchangeably
+    metadata_unique_constraints_sig = {
+        impl._create_metadata_constraint_sig(uq)
+        for uq in metadata_unique_constraints
+    }
+
+    metadata_indexes_sig = {
+        impl._create_metadata_constraint_sig(ix) for ix in metadata_indexes
+    }
+
+    conn_unique_constraints = {
+        impl._create_reflected_constraint_sig(uq) for uq in conn_uniques
+    }
+
+    conn_indexes_sig = {
+        impl._create_reflected_constraint_sig(ix) for ix in conn_indexes
+    }
+
+    # 5. index things by name, for those objects that have names
+    metadata_names = {
+        cast(str, c.md_name_to_sql_name(autogen_context)): c
+        for c in metadata_unique_constraints_sig.union(metadata_indexes_sig)
+        if c.is_named
+    }
+
+    conn_uniques_by_name: Dict[sqla_compat._ConstraintName, _constraint_sig]
+    conn_indexes_by_name: Dict[sqla_compat._ConstraintName, _constraint_sig]
+
+    conn_uniques_by_name = {c.name: c for c in conn_unique_constraints}
+    conn_indexes_by_name = {c.name: c for c in conn_indexes_sig}
+    conn_names = {
+        c.name: c
+        for c in conn_unique_constraints.union(conn_indexes_sig)
+        if sqla_compat.constraint_name_string(c.name)
+    }
+
+    doubled_constraints = {
+        name: (conn_uniques_by_name[name], conn_indexes_by_name[name])
+        for name in set(conn_uniques_by_name).intersection(
+            conn_indexes_by_name
+        )
+    }
+
+    # 6. index things by "column signature", to help with unnamed unique
+    # constraints.
+    conn_uniques_by_sig = {uq.unnamed: uq for uq in conn_unique_constraints}
+    metadata_uniques_by_sig = {
+        uq.unnamed: uq for uq in metadata_unique_constraints_sig
+    }
+    unnamed_metadata_uniques = {
+        uq.unnamed: uq
+        for uq in metadata_unique_constraints_sig
+        if not sqla_compat._constraint_is_named(
+            uq.const, autogen_context.dialect
+        )
+    }
+
+    # assumptions:
+    # 1. a unique constraint or an index from the connection *always*
+    #    has a name.
+    # 2. an index on the metadata side *always* has a name.
+    # 3. a unique constraint on the metadata side *might* have a name.
+    # 4. The backend may double up indexes as unique constraints and
+    #    vice versa (e.g. MySQL, Postgresql)
+
+    def obj_added(obj: _constraint_sig):
+        if is_index_sig(obj):
+            if autogen_context.run_object_filters(
+                obj.const, obj.name, "index", False, None
+            ):
+                modify_ops.ops.append(ops.CreateIndexOp.from_index(obj.const))
+                log.info(
+                    "Detected added index '%r' on '%s'",
+                    obj.name,
+                    obj.column_names,
+                )
+        elif is_uq_sig(obj):
+            if not supports_unique_constraints:
+                # can't report unique indexes as added if we don't
+                # detect them
+                return
+            if is_create_table or is_drop_table:
+                # unique constraints are created inline with table defs
+                return
+            if autogen_context.run_object_filters(
+                obj.const, obj.name, "unique_constraint", False, None
+            ):
+                modify_ops.ops.append(
+                    ops.AddConstraintOp.from_constraint(obj.const)
+                )
+                log.info(
+                    "Detected added unique constraint %r on '%s'",
+                    obj.name,
+                    obj.column_names,
+                )
+        else:
+            assert False
+
+    def obj_removed(obj: _constraint_sig):
+        if is_index_sig(obj):
+            if obj.is_unique and not supports_unique_constraints:
+                # many databases double up unique constraints
+                # as unique indexes.  without that list we can't
+                # be sure what we're doing here
+                return
+
+            if autogen_context.run_object_filters(
+                obj.const, obj.name, "index", True, None
+            ):
+                modify_ops.ops.append(ops.DropIndexOp.from_index(obj.const))
+                log.info("Detected removed index %r on %r", obj.name, tname)
+        elif is_uq_sig(obj):
+            if is_create_table or is_drop_table:
+                # if the whole table is being dropped, we don't need to
+                # consider unique constraint separately
+                return
+            if autogen_context.run_object_filters(
+                obj.const, obj.name, "unique_constraint", True, None
+            ):
+                modify_ops.ops.append(
+                    ops.DropConstraintOp.from_constraint(obj.const)
+                )
+                log.info(
+                    "Detected removed unique constraint %r on %r",
+                    obj.name,
+                    tname,
+                )
+        else:
+            assert False
+
+    def obj_changed(
+        old: _constraint_sig,
+        new: _constraint_sig,
+        msg: str,
+    ):
+        if is_index_sig(old):
+            assert is_index_sig(new)
+
+            if autogen_context.run_object_filters(
+                new.const, new.name, "index", False, old.const
+            ):
+                log.info(
+                    "Detected changed index %r on %r: %s", old.name, tname, msg
+                )
+                modify_ops.ops.append(ops.DropIndexOp.from_index(old.const))
+                modify_ops.ops.append(ops.CreateIndexOp.from_index(new.const))
+        elif is_uq_sig(old):
+            assert is_uq_sig(new)
+
+            if autogen_context.run_object_filters(
+                new.const, new.name, "unique_constraint", False, old.const
+            ):
+                log.info(
+                    "Detected changed unique constraint %r on %r: %s",
+                    old.name,
+                    tname,
+                    msg,
+                )
+                modify_ops.ops.append(
+                    ops.DropConstraintOp.from_constraint(old.const)
+                )
+                modify_ops.ops.append(
+                    ops.AddConstraintOp.from_constraint(new.const)
+                )
+        else:
+            assert False
+
+    for removed_name in sorted(set(conn_names).difference(metadata_names)):
+        conn_obj = conn_names[removed_name]
+        if (
+            is_uq_sig(conn_obj)
+            and conn_obj.unnamed in unnamed_metadata_uniques
+        ):
+            continue
+        elif removed_name in doubled_constraints:
+            conn_uq, conn_idx = doubled_constraints[removed_name]
+            if (
+                all(
+                    conn_idx.unnamed != meta_idx.unnamed
+                    for meta_idx in metadata_indexes_sig
+                )
+                and conn_uq.unnamed not in metadata_uniques_by_sig
+            ):
+                obj_removed(conn_uq)
+                obj_removed(conn_idx)
+        else:
+            obj_removed(conn_obj)
+
+    for existing_name in sorted(set(metadata_names).intersection(conn_names)):
+        metadata_obj = metadata_names[existing_name]
+
+        if existing_name in doubled_constraints:
+            conn_uq, conn_idx = doubled_constraints[existing_name]
+            if is_index_sig(metadata_obj):
+                conn_obj = conn_idx
+            else:
+                conn_obj = conn_uq
+        else:
+            conn_obj = conn_names[existing_name]
+
+        if type(conn_obj) != type(metadata_obj):
+            obj_removed(conn_obj)
+            obj_added(metadata_obj)
+        else:
+            comparison = metadata_obj.compare_to_reflected(conn_obj)
+
+            if comparison.is_different:
+                # constraint are different
+                obj_changed(conn_obj, metadata_obj, comparison.message)
+            elif comparison.is_skip:
+                # constraint cannot be compared, skip them
+                thing = (
+                    "index" if is_index_sig(conn_obj) else "unique constraint"
+                )
+                log.info(
+                    "Cannot compare %s %r, assuming equal and skipping. %s",
+                    thing,
+                    conn_obj.name,
+                    comparison.message,
+                )
+            else:
+                # constraint are equal
+                assert comparison.is_equal
+
+    for added_name in sorted(set(metadata_names).difference(conn_names)):
+        obj = metadata_names[added_name]
+        obj_added(obj)
+
+    for uq_sig in unnamed_metadata_uniques:
+        if uq_sig not in conn_uniques_by_sig:
+            obj_added(unnamed_metadata_uniques[uq_sig])
+
+
+def _correct_for_uq_duplicates_uix(
+    conn_unique_constraints,
+    conn_indexes,
+    metadata_unique_constraints,
+    metadata_indexes,
+    dialect,
+    impl,
+):
+    # dedupe unique indexes vs. constraints, since MySQL / Oracle
+    # doesn't really have unique constraints as a separate construct.
+    # but look in the metadata and try to maintain constructs
+    # that already seem to be defined one way or the other
+    # on that side.  This logic was formerly local to MySQL dialect,
+    # generalized to Oracle and others. See #276
+
+    # resolve final rendered name for unique constraints defined in the
+    # metadata.   this includes truncation of long names.  naming convention
+    # names currently should already be set as cons.name, however leave this
+    # to the sqla_compat to decide.
+    metadata_cons_names = [
+        (sqla_compat._get_constraint_final_name(cons, dialect), cons)
+        for cons in metadata_unique_constraints
+    ]
+
+    metadata_uq_names = {
+        name for name, cons in metadata_cons_names if name is not None
+    }
+
+    unnamed_metadata_uqs = {
+        impl._create_metadata_constraint_sig(cons).unnamed
+        for name, cons in metadata_cons_names
+        if name is None
+    }
+
+    metadata_ix_names = {
+        sqla_compat._get_constraint_final_name(cons, dialect)
+        for cons in metadata_indexes
+        if cons.unique
+    }
+
+    # for reflection side, names are in their final database form
+    # already since they're from the database
+    conn_ix_names = {cons.name: cons for cons in conn_indexes if cons.unique}
+
+    uqs_dupe_indexes = {
+        cons.name: cons
+        for cons in conn_unique_constraints
+        if cons.info["duplicates_index"]
+    }
+
+    for overlap in uqs_dupe_indexes:
+        if overlap not in metadata_uq_names:
+            if (
+                impl._create_reflected_constraint_sig(
+                    uqs_dupe_indexes[overlap]
+                ).unnamed
+                not in unnamed_metadata_uqs
+            ):
+                conn_unique_constraints.discard(uqs_dupe_indexes[overlap])
+        elif overlap not in metadata_ix_names:
+            conn_indexes.discard(conn_ix_names[overlap])
+
+
+@comparators.dispatch_for("column")
+def _compare_nullable(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: Union[quoted_name, str],
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> None:
+    metadata_col_nullable = metadata_col.nullable
+    conn_col_nullable = conn_col.nullable
+    alter_column_op.existing_nullable = conn_col_nullable
+
+    if conn_col_nullable is not metadata_col_nullable:
+        if (
+            sqla_compat._server_default_is_computed(
+                metadata_col.server_default, conn_col.server_default
+            )
+            and sqla_compat._nullability_might_be_unset(metadata_col)
+            or (
+                sqla_compat._server_default_is_identity(
+                    metadata_col.server_default, conn_col.server_default
+                )
+            )
+        ):
+            log.info(
+                "Ignoring nullable change on identity column '%s.%s'",
+                tname,
+                cname,
+            )
+        else:
+            alter_column_op.modify_nullable = metadata_col_nullable
+            log.info(
+                "Detected %s on column '%s.%s'",
+                "NULL" if metadata_col_nullable else "NOT NULL",
+                tname,
+                cname,
+            )
+
+
+@comparators.dispatch_for("column")
+def _setup_autoincrement(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: quoted_name,
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> None:
+    if metadata_col.table._autoincrement_column is metadata_col:
+        alter_column_op.kw["autoincrement"] = True
+    elif metadata_col.autoincrement is True:
+        alter_column_op.kw["autoincrement"] = True
+    elif metadata_col.autoincrement is False:
+        alter_column_op.kw["autoincrement"] = False
+
+
+@comparators.dispatch_for("column")
+def _compare_type(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: Union[quoted_name, str],
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> None:
+    conn_type = conn_col.type
+    alter_column_op.existing_type = conn_type
+    metadata_type = metadata_col.type
+    if conn_type._type_affinity is sqltypes.NullType:
+        log.info(
+            "Couldn't determine database type " "for column '%s.%s'",
+            tname,
+            cname,
+        )
+        return
+    if metadata_type._type_affinity is sqltypes.NullType:
+        log.info(
+            "Column '%s.%s' has no type within " "the model; can't compare",
+            tname,
+            cname,
+        )
+        return
+
+    isdiff = autogen_context.migration_context._compare_type(
+        conn_col, metadata_col
+    )
+
+    if isdiff:
+        alter_column_op.modify_type = metadata_type
+        log.info(
+            "Detected type change from %r to %r on '%s.%s'",
+            conn_type,
+            metadata_type,
+            tname,
+            cname,
+        )
+
+
+def _render_server_default_for_compare(
+    metadata_default: Optional[Any], autogen_context: AutogenContext
+) -> Optional[str]:
+    if isinstance(metadata_default, sa_schema.DefaultClause):
+        if isinstance(metadata_default.arg, str):
+            metadata_default = metadata_default.arg
+        else:
+            metadata_default = str(
+                metadata_default.arg.compile(
+                    dialect=autogen_context.dialect,
+                    compile_kwargs={"literal_binds": True},
+                )
+            )
+    if isinstance(metadata_default, str):
+        return metadata_default
+    else:
+        return None
+
+
+def _normalize_computed_default(sqltext: str) -> str:
+    """we want to warn if a computed sql expression has changed.  however
+    we don't want false positives and the warning is not that critical.
+    so filter out most forms of variability from the SQL text.
+
+    """
+
+    return re.sub(r"[ \(\)'\"`\[\]]", "", sqltext).lower()
+
+
+def _compare_computed_default(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: str,
+    cname: str,
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> None:
+    rendered_metadata_default = str(
+        cast(sa_schema.Computed, metadata_col.server_default).sqltext.compile(
+            dialect=autogen_context.dialect,
+            compile_kwargs={"literal_binds": True},
+        )
+    )
+
+    # since we cannot change computed columns, we do only a crude comparison
+    # here where we try to eliminate syntactical differences in order to
+    # get a minimal comparison just to emit a warning.
+
+    rendered_metadata_default = _normalize_computed_default(
+        rendered_metadata_default
+    )
+
+    if isinstance(conn_col.server_default, sa_schema.Computed):
+        rendered_conn_default = str(
+            conn_col.server_default.sqltext.compile(
+                dialect=autogen_context.dialect,
+                compile_kwargs={"literal_binds": True},
+            )
+        )
+        if rendered_conn_default is None:
+            rendered_conn_default = ""
+        else:
+            rendered_conn_default = _normalize_computed_default(
+                rendered_conn_default
+            )
+    else:
+        rendered_conn_default = ""
+
+    if rendered_metadata_default != rendered_conn_default:
+        _warn_computed_not_supported(tname, cname)
+
+
+def _warn_computed_not_supported(tname: str, cname: str) -> None:
+    util.warn("Computed default on %s.%s cannot be modified" % (tname, cname))
+
+
+def _compare_identity_default(
+    autogen_context,
+    alter_column_op,
+    schema,
+    tname,
+    cname,
+    conn_col,
+    metadata_col,
+):
+    impl = autogen_context.migration_context.impl
+    diff, ignored_attr, is_alter = impl._compare_identity_default(
+        metadata_col.server_default, conn_col.server_default
+    )
+
+    return diff, is_alter
+
+
+@comparators.dispatch_for("column")
+def _compare_server_default(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: Union[quoted_name, str],
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> Optional[bool]:
+    metadata_default = metadata_col.server_default
+    conn_col_default = conn_col.server_default
+    if conn_col_default is None and metadata_default is None:
+        return False
+
+    if sqla_compat._server_default_is_computed(metadata_default):
+        # return False in case of a computed column as the server
+        # default. Note that DDL for adding or removing "GENERATED AS" from
+        # an existing column is not currently known for any backend.
+        # Once SQLAlchemy can reflect "GENERATED" as the "computed" element,
+        # we would also want to ignore and/or warn for changes vs. the
+        # metadata (or support backend specific DDL if applicable).
+        if not sqla_compat.has_computed_reflection:
+            return False
+
+        else:
+            return (
+                _compare_computed_default(  # type:ignore[func-returns-value]
+                    autogen_context,
+                    alter_column_op,
+                    schema,
+                    tname,
+                    cname,
+                    conn_col,
+                    metadata_col,
+                )
+            )
+    if sqla_compat._server_default_is_computed(conn_col_default):
+        _warn_computed_not_supported(tname, cname)
+        return False
+
+    if sqla_compat._server_default_is_identity(
+        metadata_default, conn_col_default
+    ):
+        alter_column_op.existing_server_default = conn_col_default
+        diff, is_alter = _compare_identity_default(
+            autogen_context,
+            alter_column_op,
+            schema,
+            tname,
+            cname,
+            conn_col,
+            metadata_col,
+        )
+        if is_alter:
+            alter_column_op.modify_server_default = metadata_default
+            if diff:
+                log.info(
+                    "Detected server default on column '%s.%s': "
+                    "identity options attributes %s",
+                    tname,
+                    cname,
+                    sorted(diff),
+                )
+    else:
+        rendered_metadata_default = _render_server_default_for_compare(
+            metadata_default, autogen_context
+        )
+
+        rendered_conn_default = (
+            cast(Any, conn_col_default).arg.text if conn_col_default else None
+        )
+
+        alter_column_op.existing_server_default = conn_col_default
+
+        is_diff = autogen_context.migration_context._compare_server_default(
+            conn_col,
+            metadata_col,
+            rendered_metadata_default,
+            rendered_conn_default,
+        )
+        if is_diff:
+            alter_column_op.modify_server_default = metadata_default
+            log.info("Detected server default on column '%s.%s'", tname, cname)
+
+    return None
+
+
+@comparators.dispatch_for("column")
+def _compare_column_comment(
+    autogen_context: AutogenContext,
+    alter_column_op: AlterColumnOp,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    cname: quoted_name,
+    conn_col: Column[Any],
+    metadata_col: Column[Any],
+) -> Optional[Literal[False]]:
+    assert autogen_context.dialect is not None
+    if not autogen_context.dialect.supports_comments:
+        return None
+
+    metadata_comment = metadata_col.comment
+    conn_col_comment = conn_col.comment
+    if conn_col_comment is None and metadata_comment is None:
+        return False
+
+    alter_column_op.existing_comment = conn_col_comment
+
+    if conn_col_comment != metadata_comment:
+        alter_column_op.modify_comment = metadata_comment
+        log.info("Detected column comment '%s.%s'", tname, cname)
+
+    return None
+
+
+@comparators.dispatch_for("table")
+def _compare_foreign_keys(
+    autogen_context: AutogenContext,
+    modify_table_ops: ModifyTableOps,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    conn_table: Table,
+    metadata_table: Table,
+) -> None:
+    # if we're doing CREATE TABLE, all FKs are created
+    # inline within the table def
+    if conn_table is None or metadata_table is None:
+        return
+
+    inspector = autogen_context.inspector
+    metadata_fks = {
+        fk
+        for fk in metadata_table.constraints
+        if isinstance(fk, sa_schema.ForeignKeyConstraint)
+    }
+
+    conn_fks_list = [
+        fk
+        for fk in inspector.get_foreign_keys(tname, schema=schema)
+        if autogen_context.run_name_filters(
+            fk["name"],
+            "foreign_key_constraint",
+            {"table_name": tname, "schema_name": schema},
+        )
+    ]
+
+    conn_fks = {
+        _make_foreign_key(const, conn_table)  # type: ignore[arg-type]
+        for const in conn_fks_list
+    }
+
+    impl = autogen_context.migration_context.impl
+
+    # give the dialect a chance to correct the FKs to match more
+    # closely
+    autogen_context.migration_context.impl.correct_for_autogen_foreignkeys(
+        conn_fks, metadata_fks
+    )
+
+    metadata_fks_sig = {
+        impl._create_metadata_constraint_sig(fk) for fk in metadata_fks
+    }
+
+    conn_fks_sig = {
+        impl._create_reflected_constraint_sig(fk) for fk in conn_fks
+    }
+
+    # check if reflected FKs include options, indicating the backend
+    # can reflect FK options
+    if conn_fks_list and "options" in conn_fks_list[0]:
+        conn_fks_by_sig = {c.unnamed: c for c in conn_fks_sig}
+        metadata_fks_by_sig = {c.unnamed: c for c in metadata_fks_sig}
+    else:
+        # otherwise compare by sig without options added
+        conn_fks_by_sig = {c.unnamed_no_options: c for c in conn_fks_sig}
+        metadata_fks_by_sig = {
+            c.unnamed_no_options: c for c in metadata_fks_sig
+        }
+
+    metadata_fks_by_name = {
+        c.name: c for c in metadata_fks_sig if c.name is not None
+    }
+    conn_fks_by_name = {c.name: c for c in conn_fks_sig if c.name is not None}
+
+    def _add_fk(obj, compare_to):
+        if autogen_context.run_object_filters(
+            obj.const, obj.name, "foreign_key_constraint", False, compare_to
+        ):
+            modify_table_ops.ops.append(
+                ops.CreateForeignKeyOp.from_constraint(const.const)  # type: ignore[has-type]  # noqa: E501
+            )
+
+            log.info(
+                "Detected added foreign key (%s)(%s) on table %s%s",
+                ", ".join(obj.source_columns),
+                ", ".join(obj.target_columns),
+                "%s." % obj.source_schema if obj.source_schema else "",
+                obj.source_table,
+            )
+
+    def _remove_fk(obj, compare_to):
+        if autogen_context.run_object_filters(
+            obj.const, obj.name, "foreign_key_constraint", True, compare_to
+        ):
+            modify_table_ops.ops.append(
+                ops.DropConstraintOp.from_constraint(obj.const)
+            )
+            log.info(
+                "Detected removed foreign key (%s)(%s) on table %s%s",
+                ", ".join(obj.source_columns),
+                ", ".join(obj.target_columns),
+                "%s." % obj.source_schema if obj.source_schema else "",
+                obj.source_table,
+            )
+
+    # so far it appears we don't need to do this by name at all.
+    # SQLite doesn't preserve constraint names anyway
+
+    for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig):
+        const = conn_fks_by_sig[removed_sig]
+        if removed_sig not in metadata_fks_by_sig:
+            compare_to = (
+                metadata_fks_by_name[const.name].const
+                if const.name in metadata_fks_by_name
+                else None
+            )
+            _remove_fk(const, compare_to)
+
+    for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig):
+        const = metadata_fks_by_sig[added_sig]
+        if added_sig not in conn_fks_by_sig:
+            compare_to = (
+                conn_fks_by_name[const.name].const
+                if const.name in conn_fks_by_name
+                else None
+            )
+            _add_fk(const, compare_to)
+
+
+@comparators.dispatch_for("table")
+def _compare_table_comment(
+    autogen_context: AutogenContext,
+    modify_table_ops: ModifyTableOps,
+    schema: Optional[str],
+    tname: Union[quoted_name, str],
+    conn_table: Optional[Table],
+    metadata_table: Optional[Table],
+) -> None:
+    assert autogen_context.dialect is not None
+    if not autogen_context.dialect.supports_comments:
+        return
+
+    # if we're doing CREATE TABLE, comments will be created inline
+    # with the create_table op.
+    if conn_table is None or metadata_table is None:
+        return
+
+    if conn_table.comment is None and metadata_table.comment is None:
+        return
+
+    if metadata_table.comment is None and conn_table.comment is not None:
+        modify_table_ops.ops.append(
+            ops.DropTableCommentOp(
+                tname, existing_comment=conn_table.comment, schema=schema
+            )
+        )
+    elif metadata_table.comment != conn_table.comment:
+        modify_table_ops.ops.append(
+            ops.CreateTableCommentOp(
+                tname,
+                metadata_table.comment,
+                existing_comment=conn_table.comment,
+                schema=schema,
+            )
+        )
diff --git a/venv/Lib/site-packages/alembic/autogenerate/render.py b/venv/Lib/site-packages/alembic/autogenerate/render.py
new file mode 100644
index 0000000000000000000000000000000000000000..317a6dbed9cf6eb6514d67a82ee3ee853c22254b
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/autogenerate/render.py
@@ -0,0 +1,1097 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from io import StringIO
+import re
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from mako.pygen import PythonPrinter
+from sqlalchemy import schema as sa_schema
+from sqlalchemy import sql
+from sqlalchemy import types as sqltypes
+from sqlalchemy.sql.elements import conv
+from sqlalchemy.sql.elements import quoted_name
+
+from .. import util
+from ..operations import ops
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.sql.base import DialectKWArgs
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.schema import CheckConstraint
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import FetchedValue
+    from sqlalchemy.sql.schema import ForeignKey
+    from sqlalchemy.sql.schema import ForeignKeyConstraint
+    from sqlalchemy.sql.schema import Index
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import PrimaryKeyConstraint
+    from sqlalchemy.sql.schema import UniqueConstraint
+    from sqlalchemy.sql.sqltypes import ARRAY
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from alembic.autogenerate.api import AutogenContext
+    from alembic.config import Config
+    from alembic.operations.ops import MigrationScript
+    from alembic.operations.ops import ModifyTableOps
+    from alembic.util.sqla_compat import Computed
+    from alembic.util.sqla_compat import Identity
+
+
+MAX_PYTHON_ARGS = 255
+
+
+def _render_gen_name(
+    autogen_context: AutogenContext,
+    name: sqla_compat._ConstraintName,
+) -> Optional[Union[quoted_name, str, _f_name]]:
+    if isinstance(name, conv):
+        return _f_name(_alembic_autogenerate_prefix(autogen_context), name)
+    else:
+        return sqla_compat.constraint_name_or_none(name)
+
+
+def _indent(text: str) -> str:
+    text = re.compile(r"^", re.M).sub("    ", text).strip()
+    text = re.compile(r" +$", re.M).sub("", text)
+    return text
+
+
+def _render_python_into_templatevars(
+    autogen_context: AutogenContext,
+    migration_script: MigrationScript,
+    template_args: Dict[str, Union[str, Config]],
+) -> None:
+    imports = autogen_context.imports
+
+    for upgrade_ops, downgrade_ops in zip(
+        migration_script.upgrade_ops_list, migration_script.downgrade_ops_list
+    ):
+        template_args[upgrade_ops.upgrade_token] = _indent(
+            _render_cmd_body(upgrade_ops, autogen_context)
+        )
+        template_args[downgrade_ops.downgrade_token] = _indent(
+            _render_cmd_body(downgrade_ops, autogen_context)
+        )
+    template_args["imports"] = "\n".join(sorted(imports))
+
+
+default_renderers = renderers = util.Dispatcher()
+
+
+def _render_cmd_body(
+    op_container: ops.OpContainer,
+    autogen_context: AutogenContext,
+) -> str:
+    buf = StringIO()
+    printer = PythonPrinter(buf)
+
+    printer.writeline(
+        "# ### commands auto generated by Alembic - please adjust! ###"
+    )
+
+    has_lines = False
+    for op in op_container.ops:
+        lines = render_op(autogen_context, op)
+        has_lines = has_lines or bool(lines)
+
+        for line in lines:
+            printer.writeline(line)
+
+    if not has_lines:
+        printer.writeline("pass")
+
+    printer.writeline("# ### end Alembic commands ###")
+
+    return buf.getvalue()
+
+
+def render_op(
+    autogen_context: AutogenContext, op: ops.MigrateOperation
+) -> List[str]:
+    renderer = renderers.dispatch(op)
+    lines = util.to_list(renderer(autogen_context, op))
+    return lines
+
+
+def render_op_text(
+    autogen_context: AutogenContext, op: ops.MigrateOperation
+) -> str:
+    return "\n".join(render_op(autogen_context, op))
+
+
+@renderers.dispatch_for(ops.ModifyTableOps)
+def _render_modify_table(
+    autogen_context: AutogenContext, op: ModifyTableOps
+) -> List[str]:
+    opts = autogen_context.opts
+    render_as_batch = opts.get("render_as_batch", False)
+
+    if op.ops:
+        lines = []
+        if render_as_batch:
+            with autogen_context._within_batch():
+                lines.append(
+                    "with op.batch_alter_table(%r, schema=%r) as batch_op:"
+                    % (op.table_name, op.schema)
+                )
+                for t_op in op.ops:
+                    t_lines = render_op(autogen_context, t_op)
+                    lines.extend(t_lines)
+                lines.append("")
+        else:
+            for t_op in op.ops:
+                t_lines = render_op(autogen_context, t_op)
+                lines.extend(t_lines)
+
+        return lines
+    else:
+        return []
+
+
+@renderers.dispatch_for(ops.CreateTableCommentOp)
+def _render_create_table_comment(
+    autogen_context: AutogenContext, op: ops.CreateTableCommentOp
+) -> str:
+    if autogen_context._has_batch:
+        templ = (
+            "{prefix}create_table_comment(\n"
+            "{indent}{comment},\n"
+            "{indent}existing_comment={existing}\n"
+            ")"
+        )
+    else:
+        templ = (
+            "{prefix}create_table_comment(\n"
+            "{indent}'{tname}',\n"
+            "{indent}{comment},\n"
+            "{indent}existing_comment={existing},\n"
+            "{indent}schema={schema}\n"
+            ")"
+        )
+    return templ.format(
+        prefix=_alembic_autogenerate_prefix(autogen_context),
+        tname=op.table_name,
+        comment="%r" % op.comment if op.comment is not None else None,
+        existing="%r" % op.existing_comment
+        if op.existing_comment is not None
+        else None,
+        schema="'%s'" % op.schema if op.schema is not None else None,
+        indent="    ",
+    )
+
+
+@renderers.dispatch_for(ops.DropTableCommentOp)
+def _render_drop_table_comment(
+    autogen_context: AutogenContext, op: ops.DropTableCommentOp
+) -> str:
+    if autogen_context._has_batch:
+        templ = (
+            "{prefix}drop_table_comment(\n"
+            "{indent}existing_comment={existing}\n"
+            ")"
+        )
+    else:
+        templ = (
+            "{prefix}drop_table_comment(\n"
+            "{indent}'{tname}',\n"
+            "{indent}existing_comment={existing},\n"
+            "{indent}schema={schema}\n"
+            ")"
+        )
+    return templ.format(
+        prefix=_alembic_autogenerate_prefix(autogen_context),
+        tname=op.table_name,
+        existing="%r" % op.existing_comment
+        if op.existing_comment is not None
+        else None,
+        schema="'%s'" % op.schema if op.schema is not None else None,
+        indent="    ",
+    )
+
+
+@renderers.dispatch_for(ops.CreateTableOp)
+def _add_table(autogen_context: AutogenContext, op: ops.CreateTableOp) -> str:
+    table = op.to_table()
+
+    args = [
+        col
+        for col in [
+            _render_column(col, autogen_context) for col in table.columns
+        ]
+        if col
+    ] + sorted(
+        [
+            rcons
+            for rcons in [
+                _render_constraint(
+                    cons, autogen_context, op._namespace_metadata
+                )
+                for cons in table.constraints
+            ]
+            if rcons is not None
+        ]
+    )
+
+    if len(args) > MAX_PYTHON_ARGS:
+        args_str = "*[" + ",\n".join(args) + "]"
+    else:
+        args_str = ",\n".join(args)
+
+    text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % {
+        "tablename": _ident(op.table_name),
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "args": args_str,
+    }
+    if op.schema:
+        text += ",\nschema=%r" % _ident(op.schema)
+
+    comment = table.comment
+    if comment:
+        text += ",\ncomment=%r" % _ident(comment)
+
+    info = table.info
+    if info:
+        text += f",\ninfo={info!r}"
+
+    for k in sorted(op.kw):
+        text += ",\n%s=%r" % (k.replace(" ", "_"), op.kw[k])
+
+    if table._prefixes:
+        prefixes = ", ".join("'%s'" % p for p in table._prefixes)
+        text += ",\nprefixes=[%s]" % prefixes
+
+    text += "\n)"
+    return text
+
+
+@renderers.dispatch_for(ops.DropTableOp)
+def _drop_table(autogen_context: AutogenContext, op: ops.DropTableOp) -> str:
+    text = "%(prefix)sdrop_table(%(tname)r" % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "tname": _ident(op.table_name),
+    }
+    if op.schema:
+        text += ", schema=%r" % _ident(op.schema)
+    text += ")"
+    return text
+
+
+def _render_dialect_kwargs_items(
+    autogen_context: AutogenContext, item: DialectKWArgs
+) -> list[str]:
+    return [
+        f"{key}={_render_potential_expr(val, autogen_context)}"
+        for key, val in item.dialect_kwargs.items()
+    ]
+
+
+@renderers.dispatch_for(ops.CreateIndexOp)
+def _add_index(autogen_context: AutogenContext, op: ops.CreateIndexOp) -> str:
+    index = op.to_index()
+
+    has_batch = autogen_context._has_batch
+
+    if has_batch:
+        tmpl = (
+            "%(prefix)screate_index(%(name)r, [%(columns)s], "
+            "unique=%(unique)r%(kwargs)s)"
+        )
+    else:
+        tmpl = (
+            "%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], "
+            "unique=%(unique)r%(schema)s%(kwargs)s)"
+        )
+
+    assert index.table is not None
+
+    opts = _render_dialect_kwargs_items(autogen_context, index)
+    text = tmpl % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "name": _render_gen_name(autogen_context, index.name),
+        "table": _ident(index.table.name),
+        "columns": ", ".join(
+            _get_index_rendered_expressions(index, autogen_context)
+        ),
+        "unique": index.unique or False,
+        "schema": (", schema=%r" % _ident(index.table.schema))
+        if index.table.schema
+        else "",
+        "kwargs": ", " + ", ".join(opts) if opts else "",
+    }
+    return text
+
+
+@renderers.dispatch_for(ops.DropIndexOp)
+def _drop_index(autogen_context: AutogenContext, op: ops.DropIndexOp) -> str:
+    index = op.to_index()
+
+    has_batch = autogen_context._has_batch
+
+    if has_batch:
+        tmpl = "%(prefix)sdrop_index(%(name)r%(kwargs)s)"
+    else:
+        tmpl = (
+            "%(prefix)sdrop_index(%(name)r, "
+            "table_name=%(table_name)r%(schema)s%(kwargs)s)"
+        )
+    opts = _render_dialect_kwargs_items(autogen_context, index)
+    text = tmpl % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "name": _render_gen_name(autogen_context, op.index_name),
+        "table_name": _ident(op.table_name),
+        "schema": ((", schema=%r" % _ident(op.schema)) if op.schema else ""),
+        "kwargs": ", " + ", ".join(opts) if opts else "",
+    }
+    return text
+
+
+@renderers.dispatch_for(ops.CreateUniqueConstraintOp)
+def _add_unique_constraint(
+    autogen_context: AutogenContext, op: ops.CreateUniqueConstraintOp
+) -> List[str]:
+    return [_uq_constraint(op.to_constraint(), autogen_context, True)]
+
+
+@renderers.dispatch_for(ops.CreateForeignKeyOp)
+def _add_fk_constraint(
+    autogen_context: AutogenContext, op: ops.CreateForeignKeyOp
+) -> str:
+    args = [repr(_render_gen_name(autogen_context, op.constraint_name))]
+    if not autogen_context._has_batch:
+        args.append(repr(_ident(op.source_table)))
+
+    args.extend(
+        [
+            repr(_ident(op.referent_table)),
+            repr([_ident(col) for col in op.local_cols]),
+            repr([_ident(col) for col in op.remote_cols]),
+        ]
+    )
+    kwargs = [
+        "referent_schema",
+        "onupdate",
+        "ondelete",
+        "initially",
+        "deferrable",
+        "use_alter",
+        "match",
+    ]
+    if not autogen_context._has_batch:
+        kwargs.insert(0, "source_schema")
+
+    for k in kwargs:
+        if k in op.kw:
+            value = op.kw[k]
+            if value is not None:
+                args.append("%s=%r" % (k, value))
+
+    return "%(prefix)screate_foreign_key(%(args)s)" % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "args": ", ".join(args),
+    }
+
+
+@renderers.dispatch_for(ops.CreatePrimaryKeyOp)
+def _add_pk_constraint(constraint, autogen_context):
+    raise NotImplementedError()
+
+
+@renderers.dispatch_for(ops.CreateCheckConstraintOp)
+def _add_check_constraint(constraint, autogen_context):
+    raise NotImplementedError()
+
+
+@renderers.dispatch_for(ops.DropConstraintOp)
+def _drop_constraint(
+    autogen_context: AutogenContext, op: ops.DropConstraintOp
+) -> str:
+    prefix = _alembic_autogenerate_prefix(autogen_context)
+    name = _render_gen_name(autogen_context, op.constraint_name)
+    schema = _ident(op.schema) if op.schema else None
+    type_ = _ident(op.constraint_type) if op.constraint_type else None
+
+    params_strs = []
+    params_strs.append(repr(name))
+    if not autogen_context._has_batch:
+        params_strs.append(repr(_ident(op.table_name)))
+        if schema is not None:
+            params_strs.append(f"schema={schema!r}")
+    if type_ is not None:
+        params_strs.append(f"type_={type_!r}")
+
+    return f"{prefix}drop_constraint({', '.join(params_strs)})"
+
+
+@renderers.dispatch_for(ops.AddColumnOp)
+def _add_column(autogen_context: AutogenContext, op: ops.AddColumnOp) -> str:
+    schema, tname, column = op.schema, op.table_name, op.column
+    if autogen_context._has_batch:
+        template = "%(prefix)sadd_column(%(column)s)"
+    else:
+        template = "%(prefix)sadd_column(%(tname)r, %(column)s"
+        if schema:
+            template += ", schema=%(schema)r"
+        template += ")"
+    text = template % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "tname": tname,
+        "column": _render_column(column, autogen_context),
+        "schema": schema,
+    }
+    return text
+
+
+@renderers.dispatch_for(ops.DropColumnOp)
+def _drop_column(autogen_context: AutogenContext, op: ops.DropColumnOp) -> str:
+    schema, tname, column_name = op.schema, op.table_name, op.column_name
+
+    if autogen_context._has_batch:
+        template = "%(prefix)sdrop_column(%(cname)r)"
+    else:
+        template = "%(prefix)sdrop_column(%(tname)r, %(cname)r"
+        if schema:
+            template += ", schema=%(schema)r"
+        template += ")"
+
+    text = template % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "tname": _ident(tname),
+        "cname": _ident(column_name),
+        "schema": _ident(schema),
+    }
+    return text
+
+
+@renderers.dispatch_for(ops.AlterColumnOp)
+def _alter_column(
+    autogen_context: AutogenContext, op: ops.AlterColumnOp
+) -> str:
+    tname = op.table_name
+    cname = op.column_name
+    server_default = op.modify_server_default
+    type_ = op.modify_type
+    nullable = op.modify_nullable
+    comment = op.modify_comment
+    autoincrement = op.kw.get("autoincrement", None)
+    existing_type = op.existing_type
+    existing_nullable = op.existing_nullable
+    existing_comment = op.existing_comment
+    existing_server_default = op.existing_server_default
+    schema = op.schema
+
+    indent = " " * 11
+
+    if autogen_context._has_batch:
+        template = "%(prefix)salter_column(%(cname)r"
+    else:
+        template = "%(prefix)salter_column(%(tname)r, %(cname)r"
+
+    text = template % {
+        "prefix": _alembic_autogenerate_prefix(autogen_context),
+        "tname": tname,
+        "cname": cname,
+    }
+    if existing_type is not None:
+        text += ",\n%sexisting_type=%s" % (
+            indent,
+            _repr_type(existing_type, autogen_context),
+        )
+    if server_default is not False:
+        rendered = _render_server_default(server_default, autogen_context)
+        text += ",\n%sserver_default=%s" % (indent, rendered)
+
+    if type_ is not None:
+        text += ",\n%stype_=%s" % (indent, _repr_type(type_, autogen_context))
+    if nullable is not None:
+        text += ",\n%snullable=%r" % (indent, nullable)
+    if comment is not False:
+        text += ",\n%scomment=%r" % (indent, comment)
+    if existing_comment is not None:
+        text += ",\n%sexisting_comment=%r" % (indent, existing_comment)
+    if nullable is None and existing_nullable is not None:
+        text += ",\n%sexisting_nullable=%r" % (indent, existing_nullable)
+    if autoincrement is not None:
+        text += ",\n%sautoincrement=%r" % (indent, autoincrement)
+    if server_default is False and existing_server_default:
+        rendered = _render_server_default(
+            existing_server_default, autogen_context
+        )
+        text += ",\n%sexisting_server_default=%s" % (indent, rendered)
+    if schema and not autogen_context._has_batch:
+        text += ",\n%sschema=%r" % (indent, schema)
+    text += ")"
+    return text
+
+
+class _f_name:
+    def __init__(self, prefix: str, name: conv) -> None:
+        self.prefix = prefix
+        self.name = name
+
+    def __repr__(self) -> str:
+        return "%sf(%r)" % (self.prefix, _ident(self.name))
+
+
+def _ident(name: Optional[Union[quoted_name, str]]) -> Optional[str]:
+    """produce a __repr__() object for a string identifier that may
+    use quoted_name() in SQLAlchemy 0.9 and greater.
+
+    The issue worked around here is that quoted_name() doesn't have
+    very good repr() behavior by itself when unicode is involved.
+
+    """
+    if name is None:
+        return name
+    elif isinstance(name, quoted_name):
+        return str(name)
+    elif isinstance(name, str):
+        return name
+
+
+def _render_potential_expr(
+    value: Any,
+    autogen_context: AutogenContext,
+    *,
+    wrap_in_text: bool = True,
+    is_server_default: bool = False,
+    is_index: bool = False,
+) -> str:
+    if isinstance(value, sql.ClauseElement):
+        if wrap_in_text:
+            template = "%(prefix)stext(%(sql)r)"
+        else:
+            template = "%(sql)r"
+
+        return template % {
+            "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+            "sql": autogen_context.migration_context.impl.render_ddl_sql_expr(
+                value, is_server_default=is_server_default, is_index=is_index
+            ),
+        }
+
+    else:
+        return repr(value)
+
+
+def _get_index_rendered_expressions(
+    idx: Index, autogen_context: AutogenContext
+) -> List[str]:
+    return [
+        repr(_ident(getattr(exp, "name", None)))
+        if isinstance(exp, sa_schema.Column)
+        else _render_potential_expr(exp, autogen_context, is_index=True)
+        for exp in idx.expressions
+    ]
+
+
+def _uq_constraint(
+    constraint: UniqueConstraint,
+    autogen_context: AutogenContext,
+    alter: bool,
+) -> str:
+    opts: List[Tuple[str, Any]] = []
+
+    has_batch = autogen_context._has_batch
+
+    if constraint.deferrable:
+        opts.append(("deferrable", str(constraint.deferrable)))
+    if constraint.initially:
+        opts.append(("initially", str(constraint.initially)))
+    if not has_batch and alter and constraint.table.schema:
+        opts.append(("schema", _ident(constraint.table.schema)))
+    if not alter and constraint.name:
+        opts.append(
+            ("name", _render_gen_name(autogen_context, constraint.name))
+        )
+    dialect_options = _render_dialect_kwargs_items(autogen_context, constraint)
+
+    if alter:
+        args = [repr(_render_gen_name(autogen_context, constraint.name))]
+        if not has_batch:
+            args += [repr(_ident(constraint.table.name))]
+        args.append(repr([_ident(col.name) for col in constraint.columns]))
+        args.extend(["%s=%r" % (k, v) for k, v in opts])
+        args.extend(dialect_options)
+        return "%(prefix)screate_unique_constraint(%(args)s)" % {
+            "prefix": _alembic_autogenerate_prefix(autogen_context),
+            "args": ", ".join(args),
+        }
+    else:
+        args = [repr(_ident(col.name)) for col in constraint.columns]
+        args.extend(["%s=%r" % (k, v) for k, v in opts])
+        args.extend(dialect_options)
+        return "%(prefix)sUniqueConstraint(%(args)s)" % {
+            "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+            "args": ", ".join(args),
+        }
+
+
+def _user_autogenerate_prefix(autogen_context, target):
+    prefix = autogen_context.opts["user_module_prefix"]
+    if prefix is None:
+        return "%s." % target.__module__
+    else:
+        return prefix
+
+
+def _sqlalchemy_autogenerate_prefix(autogen_context: AutogenContext) -> str:
+    return autogen_context.opts["sqlalchemy_module_prefix"] or ""
+
+
+def _alembic_autogenerate_prefix(autogen_context: AutogenContext) -> str:
+    if autogen_context._has_batch:
+        return "batch_op."
+    else:
+        return autogen_context.opts["alembic_module_prefix"] or ""
+
+
+def _user_defined_render(
+    type_: str, object_: Any, autogen_context: AutogenContext
+) -> Union[str, Literal[False]]:
+    if "render_item" in autogen_context.opts:
+        render = autogen_context.opts["render_item"]
+        if render:
+            rendered = render(type_, object_, autogen_context)
+            if rendered is not False:
+                return rendered
+    return False
+
+
+def _render_column(
+    column: Column[Any], autogen_context: AutogenContext
+) -> str:
+    rendered = _user_defined_render("column", column, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    args: List[str] = []
+    opts: List[Tuple[str, Any]] = []
+
+    if column.server_default:
+        rendered = _render_server_default(  # type:ignore[assignment]
+            column.server_default, autogen_context
+        )
+        if rendered:
+            if _should_render_server_default_positionally(
+                column.server_default
+            ):
+                args.append(rendered)
+            else:
+                opts.append(("server_default", rendered))
+
+    if (
+        column.autoincrement is not None
+        and column.autoincrement != sqla_compat.AUTOINCREMENT_DEFAULT
+    ):
+        opts.append(("autoincrement", column.autoincrement))
+
+    if column.nullable is not None:
+        opts.append(("nullable", column.nullable))
+
+    if column.system:
+        opts.append(("system", column.system))
+
+    comment = column.comment
+    if comment:
+        opts.append(("comment", "%r" % comment))
+
+    # TODO: for non-ascii colname, assign a "key"
+    return "%(prefix)sColumn(%(name)r, %(type)s, %(args)s%(kwargs)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "name": _ident(column.name),
+        "type": _repr_type(column.type, autogen_context),
+        "args": ", ".join([str(arg) for arg in args]) + ", " if args else "",
+        "kwargs": (
+            ", ".join(
+                ["%s=%s" % (kwname, val) for kwname, val in opts]
+                + [
+                    "%s=%s"
+                    % (key, _render_potential_expr(val, autogen_context))
+                    for key, val in sqla_compat._column_kwargs(column).items()
+                ]
+            )
+        ),
+    }
+
+
+def _should_render_server_default_positionally(server_default: Any) -> bool:
+    return sqla_compat._server_default_is_computed(
+        server_default
+    ) or sqla_compat._server_default_is_identity(server_default)
+
+
+def _render_server_default(
+    default: Optional[
+        Union[FetchedValue, str, TextClause, ColumnElement[Any]]
+    ],
+    autogen_context: AutogenContext,
+    repr_: bool = True,
+) -> Optional[str]:
+    rendered = _user_defined_render("server_default", default, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    if sqla_compat._server_default_is_computed(default):
+        return _render_computed(cast("Computed", default), autogen_context)
+    elif sqla_compat._server_default_is_identity(default):
+        return _render_identity(cast("Identity", default), autogen_context)
+    elif isinstance(default, sa_schema.DefaultClause):
+        if isinstance(default.arg, str):
+            default = default.arg
+        else:
+            return _render_potential_expr(
+                default.arg, autogen_context, is_server_default=True
+            )
+
+    if isinstance(default, str) and repr_:
+        default = repr(re.sub(r"^'|'$", "", default))
+
+    return cast(str, default)
+
+
+def _render_computed(
+    computed: Computed, autogen_context: AutogenContext
+) -> str:
+    text = _render_potential_expr(
+        computed.sqltext, autogen_context, wrap_in_text=False
+    )
+
+    kwargs = {}
+    if computed.persisted is not None:
+        kwargs["persisted"] = computed.persisted
+    return "%(prefix)sComputed(%(text)s, %(kwargs)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "text": text,
+        "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())),
+    }
+
+
+def _render_identity(
+    identity: Identity, autogen_context: AutogenContext
+) -> str:
+    kwargs = sqla_compat._get_identity_options_dict(
+        identity, dialect_kwargs=True
+    )
+
+    return "%(prefix)sIdentity(%(kwargs)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())),
+    }
+
+
+def _repr_type(
+    type_: TypeEngine,
+    autogen_context: AutogenContext,
+    _skip_variants: bool = False,
+) -> str:
+    rendered = _user_defined_render("type", type_, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    if hasattr(autogen_context.migration_context, "impl"):
+        impl_rt = autogen_context.migration_context.impl.render_type(
+            type_, autogen_context
+        )
+    else:
+        impl_rt = None
+
+    mod = type(type_).__module__
+    imports = autogen_context.imports
+    if mod.startswith("sqlalchemy.dialects"):
+        match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod)
+        assert match is not None
+        dname = match.group(1)
+        if imports is not None:
+            imports.add("from sqlalchemy.dialects import %s" % dname)
+        if impl_rt:
+            return impl_rt
+        else:
+            return "%s.%r" % (dname, type_)
+    elif impl_rt:
+        return impl_rt
+    elif not _skip_variants and sqla_compat._type_has_variants(type_):
+        return _render_Variant_type(type_, autogen_context)
+    elif mod.startswith("sqlalchemy."):
+        if "_render_%s_type" % type_.__visit_name__ in globals():
+            fn = globals()["_render_%s_type" % type_.__visit_name__]
+            return fn(type_, autogen_context)
+        else:
+            prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
+            return "%s%r" % (prefix, type_)
+    else:
+        prefix = _user_autogenerate_prefix(autogen_context, type_)
+        return "%s%r" % (prefix, type_)
+
+
+def _render_ARRAY_type(type_: ARRAY, autogen_context: AutogenContext) -> str:
+    return cast(
+        str,
+        _render_type_w_subtype(
+            type_, autogen_context, "item_type", r"(.+?\()"
+        ),
+    )
+
+
+def _render_Variant_type(
+    type_: TypeEngine, autogen_context: AutogenContext
+) -> str:
+    base_type, variant_mapping = sqla_compat._get_variant_mapping(type_)
+    base = _repr_type(base_type, autogen_context, _skip_variants=True)
+    assert base is not None and base is not False  # type: ignore[comparison-overlap]  # noqa:E501
+    for dialect in sorted(variant_mapping):
+        typ = variant_mapping[dialect]
+        base += ".with_variant(%s, %r)" % (
+            _repr_type(typ, autogen_context, _skip_variants=True),
+            dialect,
+        )
+    return base
+
+
+def _render_type_w_subtype(
+    type_: TypeEngine,
+    autogen_context: AutogenContext,
+    attrname: str,
+    regexp: str,
+    prefix: Optional[str] = None,
+) -> Union[Optional[str], Literal[False]]:
+    outer_repr = repr(type_)
+    inner_type = getattr(type_, attrname, None)
+    if inner_type is None:
+        return False
+
+    inner_repr = repr(inner_type)
+
+    inner_repr = re.sub(r"([\(\)])", r"\\\1", inner_repr)
+    sub_type = _repr_type(getattr(type_, attrname), autogen_context)
+    outer_type = re.sub(regexp + inner_repr, r"\1%s" % sub_type, outer_repr)
+
+    if prefix:
+        return "%s%s" % (prefix, outer_type)
+
+    mod = type(type_).__module__
+    if mod.startswith("sqlalchemy.dialects"):
+        match = re.match(r"sqlalchemy\.dialects\.(\w+)", mod)
+        assert match is not None
+        dname = match.group(1)
+        return "%s.%s" % (dname, outer_type)
+    elif mod.startswith("sqlalchemy"):
+        prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
+        return "%s%s" % (prefix, outer_type)
+    else:
+        return None
+
+
+_constraint_renderers = util.Dispatcher()
+
+
+def _render_constraint(
+    constraint: Constraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: Optional[MetaData],
+) -> Optional[str]:
+    try:
+        renderer = _constraint_renderers.dispatch(constraint)
+    except ValueError:
+        util.warn("No renderer is established for object %r" % constraint)
+        return "[Unknown Python object %r]" % constraint
+    else:
+        return renderer(constraint, autogen_context, namespace_metadata)
+
+
+@_constraint_renderers.dispatch_for(sa_schema.PrimaryKeyConstraint)
+def _render_primary_key(
+    constraint: PrimaryKeyConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: Optional[MetaData],
+) -> Optional[str]:
+    rendered = _user_defined_render("primary_key", constraint, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    if not constraint.columns:
+        return None
+
+    opts = []
+    if constraint.name:
+        opts.append(
+            ("name", repr(_render_gen_name(autogen_context, constraint.name)))
+        )
+    return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "args": ", ".join(
+            [repr(c.name) for c in constraint.columns]
+            + ["%s=%s" % (kwname, val) for kwname, val in opts]
+        ),
+    }
+
+
+def _fk_colspec(
+    fk: ForeignKey,
+    metadata_schema: Optional[str],
+    namespace_metadata: MetaData,
+) -> str:
+    """Implement a 'safe' version of ForeignKey._get_colspec() that
+    won't fail if the remote table can't be resolved.
+
+    """
+    colspec = fk._get_colspec()
+    tokens = colspec.split(".")
+    tname, colname = tokens[-2:]
+
+    if metadata_schema is not None and len(tokens) == 2:
+        table_fullname = "%s.%s" % (metadata_schema, tname)
+    else:
+        table_fullname = ".".join(tokens[0:-1])
+
+    if (
+        not fk.link_to_name
+        and fk.parent is not None
+        and fk.parent.table is not None
+    ):
+        # try to resolve the remote table in order to adjust for column.key.
+        # the FK constraint needs to be rendered in terms of the column
+        # name.
+
+        if table_fullname in namespace_metadata.tables:
+            col = namespace_metadata.tables[table_fullname].c.get(colname)
+            if col is not None:
+                colname = _ident(col.name)  # type: ignore[assignment]
+
+    colspec = "%s.%s" % (table_fullname, colname)
+
+    return colspec
+
+
+def _populate_render_fk_opts(
+    constraint: ForeignKeyConstraint, opts: List[Tuple[str, str]]
+) -> None:
+    if constraint.onupdate:
+        opts.append(("onupdate", repr(constraint.onupdate)))
+    if constraint.ondelete:
+        opts.append(("ondelete", repr(constraint.ondelete)))
+    if constraint.initially:
+        opts.append(("initially", repr(constraint.initially)))
+    if constraint.deferrable:
+        opts.append(("deferrable", repr(constraint.deferrable)))
+    if constraint.use_alter:
+        opts.append(("use_alter", repr(constraint.use_alter)))
+    if constraint.match:
+        opts.append(("match", repr(constraint.match)))
+
+
+@_constraint_renderers.dispatch_for(sa_schema.ForeignKeyConstraint)
+def _render_foreign_key(
+    constraint: ForeignKeyConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: MetaData,
+) -> Optional[str]:
+    rendered = _user_defined_render("foreign_key", constraint, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    opts = []
+    if constraint.name:
+        opts.append(
+            ("name", repr(_render_gen_name(autogen_context, constraint.name)))
+        )
+
+    _populate_render_fk_opts(constraint, opts)
+
+    apply_metadata_schema = namespace_metadata.schema
+    return (
+        "%(prefix)sForeignKeyConstraint([%(cols)s], "
+        "[%(refcols)s], %(args)s)"
+        % {
+            "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+            "cols": ", ".join(
+                repr(_ident(f.parent.name)) for f in constraint.elements
+            ),
+            "refcols": ", ".join(
+                repr(_fk_colspec(f, apply_metadata_schema, namespace_metadata))
+                for f in constraint.elements
+            ),
+            "args": ", ".join(
+                ["%s=%s" % (kwname, val) for kwname, val in opts]
+            ),
+        }
+    )
+
+
+@_constraint_renderers.dispatch_for(sa_schema.UniqueConstraint)
+def _render_unique_constraint(
+    constraint: UniqueConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: Optional[MetaData],
+) -> str:
+    rendered = _user_defined_render("unique", constraint, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    return _uq_constraint(constraint, autogen_context, False)
+
+
+@_constraint_renderers.dispatch_for(sa_schema.CheckConstraint)
+def _render_check_constraint(
+    constraint: CheckConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: Optional[MetaData],
+) -> Optional[str]:
+    rendered = _user_defined_render("check", constraint, autogen_context)
+    if rendered is not False:
+        return rendered
+
+    # detect the constraint being part of
+    # a parent type which is probably in the Table already.
+    # ideally SQLAlchemy would give us more of a first class
+    # way to detect this.
+    if (
+        constraint._create_rule
+        and hasattr(constraint._create_rule, "target")
+        and isinstance(
+            constraint._create_rule.target,
+            sqltypes.TypeEngine,
+        )
+    ):
+        return None
+    opts = []
+    if constraint.name:
+        opts.append(
+            ("name", repr(_render_gen_name(autogen_context, constraint.name)))
+        )
+    return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % {
+        "prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
+        "opts": ", " + (", ".join("%s=%s" % (k, v) for k, v in opts))
+        if opts
+        else "",
+        "sqltext": _render_potential_expr(
+            constraint.sqltext, autogen_context, wrap_in_text=False
+        ),
+    }
+
+
+@renderers.dispatch_for(ops.ExecuteSQLOp)
+def _execute_sql(autogen_context: AutogenContext, op: ops.ExecuteSQLOp) -> str:
+    if not isinstance(op.sqltext, str):
+        raise NotImplementedError(
+            "Autogenerate rendering of SQL Expression language constructs "
+            "not supported here; please use a plain SQL string"
+        )
+    return "op.execute(%r)" % op.sqltext
+
+
+renderers = default_renderers.branch()
diff --git a/venv/Lib/site-packages/alembic/autogenerate/rewriter.py b/venv/Lib/site-packages/alembic/autogenerate/rewriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..8994dcf823cb3e40ddbab287cd7b80b166ae885b
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/autogenerate/rewriter.py
@@ -0,0 +1,240 @@
+from __future__ import annotations
+
+from typing import Any
+from typing import Callable
+from typing import Iterator
+from typing import List
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import Union
+
+from .. import util
+from ..operations import ops
+
+if TYPE_CHECKING:
+    from ..operations.ops import AddColumnOp
+    from ..operations.ops import AlterColumnOp
+    from ..operations.ops import CreateTableOp
+    from ..operations.ops import DowngradeOps
+    from ..operations.ops import MigrateOperation
+    from ..operations.ops import MigrationScript
+    from ..operations.ops import ModifyTableOps
+    from ..operations.ops import OpContainer
+    from ..operations.ops import UpgradeOps
+    from ..runtime.migration import MigrationContext
+    from ..script.revision import _GetRevArg
+
+ProcessRevisionDirectiveFn = Callable[
+    ["MigrationContext", "_GetRevArg", List["MigrationScript"]], None
+]
+
+
+class Rewriter:
+    """A helper object that allows easy 'rewriting' of ops streams.
+
+    The :class:`.Rewriter` object is intended to be passed along
+    to the
+    :paramref:`.EnvironmentContext.configure.process_revision_directives`
+    parameter in an ``env.py`` script.    Once constructed, any number
+    of "rewrites" functions can be associated with it, which will be given
+    the opportunity to modify the structure without having to have explicit
+    knowledge of the overall structure.
+
+    The function is passed the :class:`.MigrationContext` object and
+    ``revision`` tuple that are passed to the  :paramref:`.Environment
+    Context.configure.process_revision_directives` function normally,
+    and the third argument is an individual directive of the type
+    noted in the decorator.  The function has the choice of  returning
+    a single op directive, which normally can be the directive that
+    was actually passed, or a new directive to replace it, or a list
+    of zero or more directives to replace it.
+
+    .. seealso::
+
+        :ref:`autogen_rewriter` - usage example
+
+    """
+
+    _traverse = util.Dispatcher()
+
+    _chained: Tuple[Union[ProcessRevisionDirectiveFn, Rewriter], ...] = ()
+
+    def __init__(self) -> None:
+        self.dispatch = util.Dispatcher()
+
+    def chain(
+        self,
+        other: Union[
+            ProcessRevisionDirectiveFn,
+            Rewriter,
+        ],
+    ) -> Rewriter:
+        """Produce a "chain" of this :class:`.Rewriter` to another.
+
+        This allows two or more rewriters to operate serially on a stream,
+        e.g.::
+
+            writer1 = autogenerate.Rewriter()
+            writer2 = autogenerate.Rewriter()
+
+
+            @writer1.rewrites(ops.AddColumnOp)
+            def add_column_nullable(context, revision, op):
+                op.column.nullable = True
+                return op
+
+
+            @writer2.rewrites(ops.AddColumnOp)
+            def add_column_idx(context, revision, op):
+                idx_op = ops.CreateIndexOp(
+                    "ixc", op.table_name, [op.column.name]
+                )
+                return [op, idx_op]
+
+            writer = writer1.chain(writer2)
+
+        :param other: a :class:`.Rewriter` instance
+        :return: a new :class:`.Rewriter` that will run the operations
+         of this writer, then the "other" writer, in succession.
+
+        """
+        wr = self.__class__.__new__(self.__class__)
+        wr.__dict__.update(self.__dict__)
+        wr._chained += (other,)
+        return wr
+
+    def rewrites(
+        self,
+        operator: Union[
+            Type[AddColumnOp],
+            Type[MigrateOperation],
+            Type[AlterColumnOp],
+            Type[CreateTableOp],
+            Type[ModifyTableOps],
+        ],
+    ) -> Callable[..., Any]:
+        """Register a function as rewriter for a given type.
+
+        The function should receive three arguments, which are
+        the :class:`.MigrationContext`, a ``revision`` tuple, and
+        an op directive of the type indicated.  E.g.::
+
+            @writer1.rewrites(ops.AddColumnOp)
+            def add_column_nullable(context, revision, op):
+                op.column.nullable = True
+                return op
+
+        """
+        return self.dispatch.dispatch_for(operator)
+
+    def _rewrite(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: MigrateOperation,
+    ) -> Iterator[MigrateOperation]:
+        try:
+            _rewriter = self.dispatch.dispatch(directive)
+        except ValueError:
+            _rewriter = None
+            yield directive
+        else:
+            if self in directive._mutations:
+                yield directive
+            else:
+                for r_directive in util.to_list(
+                    _rewriter(context, revision, directive), []
+                ):
+                    r_directive._mutations = r_directive._mutations.union(
+                        [self]
+                    )
+                    yield r_directive
+
+    def __call__(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directives: List[MigrationScript],
+    ) -> None:
+        self.process_revision_directives(context, revision, directives)
+        for process_revision_directives in self._chained:
+            process_revision_directives(context, revision, directives)
+
+    @_traverse.dispatch_for(ops.MigrationScript)
+    def _traverse_script(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: MigrationScript,
+    ) -> None:
+        upgrade_ops_list: List[UpgradeOps] = []
+        for upgrade_ops in directive.upgrade_ops_list:
+            ret = self._traverse_for(context, revision, upgrade_ops)
+            if len(ret) != 1:
+                raise ValueError(
+                    "Can only return single object for UpgradeOps traverse"
+                )
+            upgrade_ops_list.append(ret[0])
+
+        directive.upgrade_ops = upgrade_ops_list  # type: ignore
+
+        downgrade_ops_list: List[DowngradeOps] = []
+        for downgrade_ops in directive.downgrade_ops_list:
+            ret = self._traverse_for(context, revision, downgrade_ops)
+            if len(ret) != 1:
+                raise ValueError(
+                    "Can only return single object for DowngradeOps traverse"
+                )
+            downgrade_ops_list.append(ret[0])
+        directive.downgrade_ops = downgrade_ops_list  # type: ignore
+
+    @_traverse.dispatch_for(ops.OpContainer)
+    def _traverse_op_container(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: OpContainer,
+    ) -> None:
+        self._traverse_list(context, revision, directive.ops)
+
+    @_traverse.dispatch_for(ops.MigrateOperation)
+    def _traverse_any_directive(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: MigrateOperation,
+    ) -> None:
+        pass
+
+    def _traverse_for(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directive: MigrateOperation,
+    ) -> Any:
+        directives = list(self._rewrite(context, revision, directive))
+        for directive in directives:
+            traverser = self._traverse.dispatch(directive)
+            traverser(self, context, revision, directive)
+        return directives
+
+    def _traverse_list(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directives: Any,
+    ) -> None:
+        dest = []
+        for directive in directives:
+            dest.extend(self._traverse_for(context, revision, directive))
+
+        directives[:] = dest
+
+    def process_revision_directives(
+        self,
+        context: MigrationContext,
+        revision: _GetRevArg,
+        directives: List[MigrationScript],
+    ) -> None:
+        self._traverse_list(context, revision, directives)
diff --git a/venv/Lib/site-packages/alembic/command.py b/venv/Lib/site-packages/alembic/command.py
new file mode 100644
index 0000000000000000000000000000000000000000..37aa6e67ebf1c82416c96fd40f1dee0ae8c82dcb
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/command.py
@@ -0,0 +1,749 @@
+# mypy: allow-untyped-defs, allow-untyped-calls
+
+from __future__ import annotations
+
+import os
+from typing import List
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from . import autogenerate as autogen
+from . import util
+from .runtime.environment import EnvironmentContext
+from .script import ScriptDirectory
+
+if TYPE_CHECKING:
+    from alembic.config import Config
+    from alembic.script.base import Script
+    from alembic.script.revision import _RevIdType
+    from .runtime.environment import ProcessRevisionDirectiveFn
+
+
+def list_templates(config: Config) -> None:
+    """List available templates.
+
+    :param config: a :class:`.Config` object.
+
+    """
+
+    config.print_stdout("Available templates:\n")
+    for tempname in os.listdir(config.get_template_directory()):
+        with open(
+            os.path.join(config.get_template_directory(), tempname, "README")
+        ) as readme:
+            synopsis = next(readme).rstrip()
+        config.print_stdout("%s - %s", tempname, synopsis)
+
+    config.print_stdout("\nTemplates are used via the 'init' command, e.g.:")
+    config.print_stdout("\n  alembic init --template generic ./scripts")
+
+
+def init(
+    config: Config,
+    directory: str,
+    template: str = "generic",
+    package: bool = False,
+) -> None:
+    """Initialize a new scripts directory.
+
+    :param config: a :class:`.Config` object.
+
+    :param directory: string path of the target directory
+
+    :param template: string name of the migration environment template to
+     use.
+
+    :param package: when True, write ``__init__.py`` files into the
+     environment location as well as the versions/ location.
+
+    """
+
+    if os.access(directory, os.F_OK) and os.listdir(directory):
+        raise util.CommandError(
+            "Directory %s already exists and is not empty" % directory
+        )
+
+    template_dir = os.path.join(config.get_template_directory(), template)
+    if not os.access(template_dir, os.F_OK):
+        raise util.CommandError("No such template %r" % template)
+
+    if not os.access(directory, os.F_OK):
+        with util.status(
+            f"Creating directory {os.path.abspath(directory)!r}",
+            **config.messaging_opts,
+        ):
+            os.makedirs(directory)
+
+    versions = os.path.join(directory, "versions")
+    with util.status(
+        f"Creating directory {os.path.abspath(versions)!r}",
+        **config.messaging_opts,
+    ):
+        os.makedirs(versions)
+
+    script = ScriptDirectory(directory)
+
+    config_file: str | None = None
+    for file_ in os.listdir(template_dir):
+        file_path = os.path.join(template_dir, file_)
+        if file_ == "alembic.ini.mako":
+            assert config.config_file_name is not None
+            config_file = os.path.abspath(config.config_file_name)
+            if os.access(config_file, os.F_OK):
+                util.msg(
+                    f"File {config_file!r} already exists, skipping",
+                    **config.messaging_opts,
+                )
+            else:
+                script._generate_template(
+                    file_path, config_file, script_location=directory
+                )
+        elif os.path.isfile(file_path):
+            output_file = os.path.join(directory, file_)
+            script._copy_file(file_path, output_file)
+
+    if package:
+        for path in [
+            os.path.join(os.path.abspath(directory), "__init__.py"),
+            os.path.join(os.path.abspath(versions), "__init__.py"),
+        ]:
+            with util.status(f"Adding {path!r}", **config.messaging_opts):
+                with open(path, "w"):
+                    pass
+
+    assert config_file is not None
+    util.msg(
+        "Please edit configuration/connection/logging "
+        f"settings in {config_file!r} before proceeding.",
+        **config.messaging_opts,
+    )
+
+
+def revision(
+    config: Config,
+    message: Optional[str] = None,
+    autogenerate: bool = False,
+    sql: bool = False,
+    head: str = "head",
+    splice: bool = False,
+    branch_label: Optional[_RevIdType] = None,
+    version_path: Optional[str] = None,
+    rev_id: Optional[str] = None,
+    depends_on: Optional[str] = None,
+    process_revision_directives: Optional[ProcessRevisionDirectiveFn] = None,
+) -> Union[Optional[Script], List[Optional[Script]]]:
+    """Create a new revision file.
+
+    :param config: a :class:`.Config` object.
+
+    :param message: string message to apply to the revision; this is the
+     ``-m`` option to ``alembic revision``.
+
+    :param autogenerate: whether or not to autogenerate the script from
+     the database; this is the ``--autogenerate`` option to
+     ``alembic revision``.
+
+    :param sql: whether to dump the script out as a SQL string; when specified,
+     the script is dumped to stdout.  This is the ``--sql`` option to
+     ``alembic revision``.
+
+    :param head: head revision to build the new revision upon as a parent;
+     this is the ``--head`` option to ``alembic revision``.
+
+    :param splice: whether or not the new revision should be made into a
+     new head of its own; is required when the given ``head`` is not itself
+     a head.  This is the ``--splice`` option to ``alembic revision``.
+
+    :param branch_label: string label to apply to the branch; this is the
+     ``--branch-label`` option to ``alembic revision``.
+
+    :param version_path: string symbol identifying a specific version path
+     from the configuration; this is the ``--version-path`` option to
+     ``alembic revision``.
+
+    :param rev_id: optional revision identifier to use instead of having
+     one generated; this is the ``--rev-id`` option to ``alembic revision``.
+
+    :param depends_on: optional list of "depends on" identifiers; this is the
+     ``--depends-on`` option to ``alembic revision``.
+
+    :param process_revision_directives: this is a callable that takes the
+     same form as the callable described at
+     :paramref:`.EnvironmentContext.configure.process_revision_directives`;
+     will be applied to the structure generated by the revision process
+     where it can be altered programmatically.   Note that unlike all
+     the other parameters, this option is only available via programmatic
+     use of :func:`.command.revision`
+
+    """
+
+    script_directory = ScriptDirectory.from_config(config)
+
+    command_args = dict(
+        message=message,
+        autogenerate=autogenerate,
+        sql=sql,
+        head=head,
+        splice=splice,
+        branch_label=branch_label,
+        version_path=version_path,
+        rev_id=rev_id,
+        depends_on=depends_on,
+    )
+    revision_context = autogen.RevisionContext(
+        config,
+        script_directory,
+        command_args,
+        process_revision_directives=process_revision_directives,
+    )
+
+    environment = util.asbool(config.get_main_option("revision_environment"))
+
+    if autogenerate:
+        environment = True
+
+        if sql:
+            raise util.CommandError(
+                "Using --sql with --autogenerate does not make any sense"
+            )
+
+        def retrieve_migrations(rev, context):
+            revision_context.run_autogenerate(rev, context)
+            return []
+
+    elif environment:
+
+        def retrieve_migrations(rev, context):
+            revision_context.run_no_autogenerate(rev, context)
+            return []
+
+    elif sql:
+        raise util.CommandError(
+            "Using --sql with the revision command when "
+            "revision_environment is not configured does not make any sense"
+        )
+
+    if environment:
+        with EnvironmentContext(
+            config,
+            script_directory,
+            fn=retrieve_migrations,
+            as_sql=sql,
+            template_args=revision_context.template_args,
+            revision_context=revision_context,
+        ):
+            script_directory.run_env()
+
+        # the revision_context now has MigrationScript structure(s) present.
+        # these could theoretically be further processed / rewritten *here*,
+        # in addition to the hooks present within each run_migrations() call,
+        # or at the end of env.py run_migrations_online().
+
+    scripts = [script for script in revision_context.generate_scripts()]
+    if len(scripts) == 1:
+        return scripts[0]
+    else:
+        return scripts
+
+
+def check(config: "Config") -> None:
+    """Check if revision command with autogenerate has pending upgrade ops.
+
+    :param config: a :class:`.Config` object.
+
+    .. versionadded:: 1.9.0
+
+    """
+
+    script_directory = ScriptDirectory.from_config(config)
+
+    command_args = dict(
+        message=None,
+        autogenerate=True,
+        sql=False,
+        head="head",
+        splice=False,
+        branch_label=None,
+        version_path=None,
+        rev_id=None,
+        depends_on=None,
+    )
+    revision_context = autogen.RevisionContext(
+        config,
+        script_directory,
+        command_args,
+    )
+
+    def retrieve_migrations(rev, context):
+        revision_context.run_autogenerate(rev, context)
+        return []
+
+    with EnvironmentContext(
+        config,
+        script_directory,
+        fn=retrieve_migrations,
+        as_sql=False,
+        template_args=revision_context.template_args,
+        revision_context=revision_context,
+    ):
+        script_directory.run_env()
+
+    # the revision_context now has MigrationScript structure(s) present.
+
+    migration_script = revision_context.generated_revisions[-1]
+    diffs = []
+    for upgrade_ops in migration_script.upgrade_ops_list:
+        diffs.extend(upgrade_ops.as_diffs())
+
+    if diffs:
+        raise util.AutogenerateDiffsDetected(
+            f"New upgrade operations detected: {diffs}"
+        )
+    else:
+        config.print_stdout("No new upgrade operations detected.")
+
+
+def merge(
+    config: Config,
+    revisions: _RevIdType,
+    message: Optional[str] = None,
+    branch_label: Optional[_RevIdType] = None,
+    rev_id: Optional[str] = None,
+) -> Optional[Script]:
+    """Merge two revisions together.  Creates a new migration file.
+
+    :param config: a :class:`.Config` instance
+
+    :param message: string message to apply to the revision
+
+    :param branch_label: string label name to apply to the new revision
+
+    :param rev_id: hardcoded revision identifier instead of generating a new
+     one.
+
+    .. seealso::
+
+        :ref:`branches`
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+    template_args = {
+        "config": config  # Let templates use config for
+        # e.g. multiple databases
+    }
+
+    environment = util.asbool(config.get_main_option("revision_environment"))
+
+    if environment:
+
+        def nothing(rev, context):
+            return []
+
+        with EnvironmentContext(
+            config,
+            script,
+            fn=nothing,
+            as_sql=False,
+            template_args=template_args,
+        ):
+            script.run_env()
+
+    return script.generate_revision(
+        rev_id or util.rev_id(),
+        message,
+        refresh=True,
+        head=revisions,
+        branch_labels=branch_label,
+        **template_args,  # type:ignore[arg-type]
+    )
+
+
+def upgrade(
+    config: Config,
+    revision: str,
+    sql: bool = False,
+    tag: Optional[str] = None,
+) -> None:
+    """Upgrade to a later version.
+
+    :param config: a :class:`.Config` instance.
+
+    :param revision: string revision target or range for --sql mode
+
+    :param sql: if True, use ``--sql`` mode
+
+    :param tag: an arbitrary "tag" that can be intercepted by custom
+     ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
+     method.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    starting_rev = None
+    if ":" in revision:
+        if not sql:
+            raise util.CommandError("Range revision not allowed")
+        starting_rev, revision = revision.split(":", 2)
+
+    def upgrade(rev, context):
+        return script._upgrade_revs(revision, rev)
+
+    with EnvironmentContext(
+        config,
+        script,
+        fn=upgrade,
+        as_sql=sql,
+        starting_rev=starting_rev,
+        destination_rev=revision,
+        tag=tag,
+    ):
+        script.run_env()
+
+
+def downgrade(
+    config: Config,
+    revision: str,
+    sql: bool = False,
+    tag: Optional[str] = None,
+) -> None:
+    """Revert to a previous version.
+
+    :param config: a :class:`.Config` instance.
+
+    :param revision: string revision target or range for --sql mode
+
+    :param sql: if True, use ``--sql`` mode
+
+    :param tag: an arbitrary "tag" that can be intercepted by custom
+     ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
+     method.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+    starting_rev = None
+    if ":" in revision:
+        if not sql:
+            raise util.CommandError("Range revision not allowed")
+        starting_rev, revision = revision.split(":", 2)
+    elif sql:
+        raise util.CommandError(
+            "downgrade with --sql requires <fromrev>:<torev>"
+        )
+
+    def downgrade(rev, context):
+        return script._downgrade_revs(revision, rev)
+
+    with EnvironmentContext(
+        config,
+        script,
+        fn=downgrade,
+        as_sql=sql,
+        starting_rev=starting_rev,
+        destination_rev=revision,
+        tag=tag,
+    ):
+        script.run_env()
+
+
+def show(config, rev):
+    """Show the revision(s) denoted by the given symbol.
+
+    :param config: a :class:`.Config` instance.
+
+    :param revision: string revision target
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    if rev == "current":
+
+        def show_current(rev, context):
+            for sc in script.get_revisions(rev):
+                config.print_stdout(sc.log_entry)
+            return []
+
+        with EnvironmentContext(config, script, fn=show_current):
+            script.run_env()
+    else:
+        for sc in script.get_revisions(rev):
+            config.print_stdout(sc.log_entry)
+
+
+def history(
+    config: Config,
+    rev_range: Optional[str] = None,
+    verbose: bool = False,
+    indicate_current: bool = False,
+) -> None:
+    """List changeset scripts in chronological order.
+
+    :param config: a :class:`.Config` instance.
+
+    :param rev_range: string revision range
+
+    :param verbose: output in verbose mode.
+
+    :param indicate_current: indicate current revision.
+
+    """
+    base: Optional[str]
+    head: Optional[str]
+    script = ScriptDirectory.from_config(config)
+    if rev_range is not None:
+        if ":" not in rev_range:
+            raise util.CommandError(
+                "History range requires [start]:[end], " "[start]:, or :[end]"
+            )
+        base, head = rev_range.strip().split(":")
+    else:
+        base = head = None
+
+    environment = (
+        util.asbool(config.get_main_option("revision_environment"))
+        or indicate_current
+    )
+
+    def _display_history(config, script, base, head, currents=()):
+        for sc in script.walk_revisions(
+            base=base or "base", head=head or "heads"
+        ):
+            if indicate_current:
+                sc._db_current_indicator = sc.revision in currents
+
+            config.print_stdout(
+                sc.cmd_format(
+                    verbose=verbose,
+                    include_branches=True,
+                    include_doc=True,
+                    include_parents=True,
+                )
+            )
+
+    def _display_history_w_current(config, script, base, head):
+        def _display_current_history(rev, context):
+            if head == "current":
+                _display_history(config, script, base, rev, rev)
+            elif base == "current":
+                _display_history(config, script, rev, head, rev)
+            else:
+                _display_history(config, script, base, head, rev)
+            return []
+
+        with EnvironmentContext(config, script, fn=_display_current_history):
+            script.run_env()
+
+    if base == "current" or head == "current" or environment:
+        _display_history_w_current(config, script, base, head)
+    else:
+        _display_history(config, script, base, head)
+
+
+def heads(config, verbose=False, resolve_dependencies=False):
+    """Show current available heads in the script directory.
+
+    :param config: a :class:`.Config` instance.
+
+    :param verbose: output in verbose mode.
+
+    :param resolve_dependencies: treat dependency version as down revisions.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+    if resolve_dependencies:
+        heads = script.get_revisions("heads")
+    else:
+        heads = script.get_revisions(script.get_heads())
+
+    for rev in heads:
+        config.print_stdout(
+            rev.cmd_format(
+                verbose, include_branches=True, tree_indicators=False
+            )
+        )
+
+
+def branches(config, verbose=False):
+    """Show current branch points.
+
+    :param config: a :class:`.Config` instance.
+
+    :param verbose: output in verbose mode.
+
+    """
+    script = ScriptDirectory.from_config(config)
+    for sc in script.walk_revisions():
+        if sc.is_branch_point:
+            config.print_stdout(
+                "%s\n%s\n",
+                sc.cmd_format(verbose, include_branches=True),
+                "\n".join(
+                    "%s -> %s"
+                    % (
+                        " " * len(str(sc.revision)),
+                        rev_obj.cmd_format(
+                            False, include_branches=True, include_doc=verbose
+                        ),
+                    )
+                    for rev_obj in (
+                        script.get_revision(rev) for rev in sc.nextrev
+                    )
+                ),
+            )
+
+
+def current(config: Config, verbose: bool = False) -> None:
+    """Display the current revision for a database.
+
+    :param config: a :class:`.Config` instance.
+
+    :param verbose: output in verbose mode.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    def display_version(rev, context):
+        if verbose:
+            config.print_stdout(
+                "Current revision(s) for %s:",
+                util.obfuscate_url_pw(context.connection.engine.url),
+            )
+        for rev in script.get_all_current(rev):
+            config.print_stdout(rev.cmd_format(verbose))
+
+        return []
+
+    with EnvironmentContext(
+        config, script, fn=display_version, dont_mutate=True
+    ):
+        script.run_env()
+
+
+def stamp(
+    config: Config,
+    revision: _RevIdType,
+    sql: bool = False,
+    tag: Optional[str] = None,
+    purge: bool = False,
+) -> None:
+    """'stamp' the revision table with the given revision; don't
+    run any migrations.
+
+    :param config: a :class:`.Config` instance.
+
+    :param revision: target revision or list of revisions.   May be a list
+     to indicate stamping of multiple branch heads.
+
+     .. note:: this parameter is called "revisions" in the command line
+        interface.
+
+    :param sql: use ``--sql`` mode
+
+    :param tag: an arbitrary "tag" that can be intercepted by custom
+     ``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument`
+     method.
+
+    :param purge: delete all entries in the version table before stamping.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    if sql:
+        destination_revs = []
+        starting_rev = None
+        for _revision in util.to_list(revision):
+            if ":" in _revision:
+                srev, _revision = _revision.split(":", 2)
+
+                if starting_rev != srev:
+                    if starting_rev is None:
+                        starting_rev = srev
+                    else:
+                        raise util.CommandError(
+                            "Stamp operation with --sql only supports a "
+                            "single starting revision at a time"
+                        )
+            destination_revs.append(_revision)
+    else:
+        destination_revs = util.to_list(revision)
+
+    def do_stamp(rev, context):
+        return script._stamp_revs(util.to_tuple(destination_revs), rev)
+
+    with EnvironmentContext(
+        config,
+        script,
+        fn=do_stamp,
+        as_sql=sql,
+        starting_rev=starting_rev if sql else None,
+        destination_rev=util.to_tuple(destination_revs),
+        tag=tag,
+        purge=purge,
+    ):
+        script.run_env()
+
+
+def edit(config: Config, rev: str) -> None:
+    """Edit revision script(s) using $EDITOR.
+
+    :param config: a :class:`.Config` instance.
+
+    :param rev: target revision.
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    if rev == "current":
+
+        def edit_current(rev, context):
+            if not rev:
+                raise util.CommandError("No current revisions")
+            for sc in script.get_revisions(rev):
+                util.open_in_editor(sc.path)
+            return []
+
+        with EnvironmentContext(config, script, fn=edit_current):
+            script.run_env()
+    else:
+        revs = script.get_revisions(rev)
+        if not revs:
+            raise util.CommandError(
+                "No revision files indicated by symbol '%s'" % rev
+            )
+        for sc in revs:
+            assert sc
+            util.open_in_editor(sc.path)
+
+
+def ensure_version(config: Config, sql: bool = False) -> None:
+    """Create the alembic version table if it doesn't exist already .
+
+    :param config: a :class:`.Config` instance.
+
+    :param sql: use ``--sql`` mode
+
+     .. versionadded:: 1.7.6
+
+    """
+
+    script = ScriptDirectory.from_config(config)
+
+    def do_ensure_version(rev, context):
+        context._ensure_version_table()
+        return []
+
+    with EnvironmentContext(
+        config,
+        script,
+        fn=do_ensure_version,
+        as_sql=sql,
+    ):
+        script.run_env()
diff --git a/venv/Lib/site-packages/alembic/config.py b/venv/Lib/site-packages/alembic/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b2263fddacf0cd85e9969221ef9b3d8105cd8f4
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/config.py
@@ -0,0 +1,645 @@
+from __future__ import annotations
+
+from argparse import ArgumentParser
+from argparse import Namespace
+from configparser import ConfigParser
+import inspect
+import os
+import sys
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import Mapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import TextIO
+from typing import Union
+
+from typing_extensions import TypedDict
+
+from . import __version__
+from . import command
+from . import util
+from .util import compat
+
+
+class Config:
+    r"""Represent an Alembic configuration.
+
+    Within an ``env.py`` script, this is available
+    via the :attr:`.EnvironmentContext.config` attribute,
+    which in turn is available at ``alembic.context``::
+
+        from alembic import context
+
+        some_param = context.config.get_main_option("my option")
+
+    When invoking Alembic programmatically, a new
+    :class:`.Config` can be created by passing
+    the name of an .ini file to the constructor::
+
+        from alembic.config import Config
+        alembic_cfg = Config("/path/to/yourapp/alembic.ini")
+
+    With a :class:`.Config` object, you can then
+    run Alembic commands programmatically using the directives
+    in :mod:`alembic.command`.
+
+    The :class:`.Config` object can also be constructed without
+    a filename.   Values can be set programmatically, and
+    new sections will be created as needed::
+
+        from alembic.config import Config
+        alembic_cfg = Config()
+        alembic_cfg.set_main_option("script_location", "myapp:migrations")
+        alembic_cfg.set_main_option("sqlalchemy.url", "postgresql://foo/bar")
+        alembic_cfg.set_section_option("mysection", "foo", "bar")
+
+    .. warning::
+
+       When using programmatic configuration, make sure the
+       ``env.py`` file in use is compatible with the target configuration;
+       including that the call to Python ``logging.fileConfig()`` is
+       omitted if the programmatic configuration doesn't actually include
+       logging directives.
+
+    For passing non-string values to environments, such as connections and
+    engines, use the :attr:`.Config.attributes` dictionary::
+
+        with engine.begin() as connection:
+            alembic_cfg.attributes['connection'] = connection
+            command.upgrade(alembic_cfg, "head")
+
+    :param file\_: name of the .ini file to open.
+    :param ini_section: name of the main Alembic section within the
+     .ini file
+    :param output_buffer: optional file-like input buffer which
+     will be passed to the :class:`.MigrationContext` - used to redirect
+     the output of "offline generation" when using Alembic programmatically.
+    :param stdout: buffer where the "print" output of commands will be sent.
+     Defaults to ``sys.stdout``.
+
+    :param config_args: A dictionary of keys and values that will be used
+     for substitution in the alembic config file.  The dictionary as given
+     is **copied** to a new one, stored locally as the attribute
+     ``.config_args``. When the :attr:`.Config.file_config` attribute is
+     first invoked, the replacement variable ``here`` will be added to this
+     dictionary before the dictionary is passed to ``ConfigParser()``
+     to parse the .ini file.
+
+    :param attributes: optional dictionary of arbitrary Python keys/values,
+     which will be populated into the :attr:`.Config.attributes` dictionary.
+
+     .. seealso::
+
+        :ref:`connection_sharing`
+
+    """
+
+    def __init__(
+        self,
+        file_: Union[str, os.PathLike[str], None] = None,
+        ini_section: str = "alembic",
+        output_buffer: Optional[TextIO] = None,
+        stdout: TextIO = sys.stdout,
+        cmd_opts: Optional[Namespace] = None,
+        config_args: Mapping[str, Any] = util.immutabledict(),
+        attributes: Optional[Dict[str, Any]] = None,
+    ) -> None:
+        """Construct a new :class:`.Config`"""
+        self.config_file_name = file_
+        self.config_ini_section = ini_section
+        self.output_buffer = output_buffer
+        self.stdout = stdout
+        self.cmd_opts = cmd_opts
+        self.config_args = dict(config_args)
+        if attributes:
+            self.attributes.update(attributes)
+
+    cmd_opts: Optional[Namespace] = None
+    """The command-line options passed to the ``alembic`` script.
+
+    Within an ``env.py`` script this can be accessed via the
+    :attr:`.EnvironmentContext.config` attribute.
+
+    .. seealso::
+
+        :meth:`.EnvironmentContext.get_x_argument`
+
+    """
+
+    config_file_name: Union[str, os.PathLike[str], None] = None
+    """Filesystem path to the .ini file in use."""
+
+    config_ini_section: str = None  # type:ignore[assignment]
+    """Name of the config file section to read basic configuration
+    from.  Defaults to ``alembic``, that is the ``[alembic]`` section
+    of the .ini file.  This value is modified using the ``-n/--name``
+    option to the Alembic runner.
+
+    """
+
+    @util.memoized_property
+    def attributes(self) -> Dict[str, Any]:
+        """A Python dictionary for storage of additional state.
+
+
+        This is a utility dictionary which can include not just strings but
+        engines, connections, schema objects, or anything else.
+        Use this to pass objects into an env.py script, such as passing
+        a :class:`sqlalchemy.engine.base.Connection` when calling
+        commands from :mod:`alembic.command` programmatically.
+
+        .. seealso::
+
+            :ref:`connection_sharing`
+
+            :paramref:`.Config.attributes`
+
+        """
+        return {}
+
+    def print_stdout(self, text: str, *arg: Any) -> None:
+        """Render a message to standard out.
+
+        When :meth:`.Config.print_stdout` is called with additional args
+        those arguments will formatted against the provided text,
+        otherwise we simply output the provided text verbatim.
+
+        This is a no-op when the``quiet`` messaging option is enabled.
+
+        e.g.::
+
+            >>> config.print_stdout('Some text %s', 'arg')
+            Some Text arg
+
+        """
+
+        if arg:
+            output = str(text) % arg
+        else:
+            output = str(text)
+
+        util.write_outstream(self.stdout, output, "\n", **self.messaging_opts)
+
+    @util.memoized_property
+    def file_config(self) -> ConfigParser:
+        """Return the underlying ``ConfigParser`` object.
+
+        Direct access to the .ini file is available here,
+        though the :meth:`.Config.get_section` and
+        :meth:`.Config.get_main_option`
+        methods provide a possibly simpler interface.
+
+        """
+
+        if self.config_file_name:
+            here = os.path.abspath(os.path.dirname(self.config_file_name))
+        else:
+            here = ""
+        self.config_args["here"] = here
+        file_config = ConfigParser(self.config_args)
+        if self.config_file_name:
+            compat.read_config_parser(file_config, [self.config_file_name])
+        else:
+            file_config.add_section(self.config_ini_section)
+        return file_config
+
+    def get_template_directory(self) -> str:
+        """Return the directory where Alembic setup templates are found.
+
+        This method is used by the alembic ``init`` and ``list_templates``
+        commands.
+
+        """
+        import alembic
+
+        package_dir = os.path.abspath(os.path.dirname(alembic.__file__))
+        return os.path.join(package_dir, "templates")
+
+    @overload
+    def get_section(
+        self, name: str, default: None = ...
+    ) -> Optional[Dict[str, str]]:
+        ...
+
+    # "default" here could also be a TypeVar
+    # _MT = TypeVar("_MT", bound=Mapping[str, str]),
+    # however mypy wasn't handling that correctly (pyright was)
+    @overload
+    def get_section(
+        self, name: str, default: Dict[str, str]
+    ) -> Dict[str, str]:
+        ...
+
+    @overload
+    def get_section(
+        self, name: str, default: Mapping[str, str]
+    ) -> Union[Dict[str, str], Mapping[str, str]]:
+        ...
+
+    def get_section(
+        self, name: str, default: Optional[Mapping[str, str]] = None
+    ) -> Optional[Mapping[str, str]]:
+        """Return all the configuration options from a given .ini file section
+        as a dictionary.
+
+        If the given section does not exist, the value of ``default``
+        is returned, which is expected to be a dictionary or other mapping.
+
+        """
+        if not self.file_config.has_section(name):
+            return default
+
+        return dict(self.file_config.items(name))
+
+    def set_main_option(self, name: str, value: str) -> None:
+        """Set an option programmatically within the 'main' section.
+
+        This overrides whatever was in the .ini file.
+
+        :param name: name of the value
+
+        :param value: the value.  Note that this value is passed to
+         ``ConfigParser.set``, which supports variable interpolation using
+         pyformat (e.g. ``%(some_value)s``).   A raw percent sign not part of
+         an interpolation symbol must therefore be escaped, e.g. ``%%``.
+         The given value may refer to another value already in the file
+         using the interpolation format.
+
+        """
+        self.set_section_option(self.config_ini_section, name, value)
+
+    def remove_main_option(self, name: str) -> None:
+        self.file_config.remove_option(self.config_ini_section, name)
+
+    def set_section_option(self, section: str, name: str, value: str) -> None:
+        """Set an option programmatically within the given section.
+
+        The section is created if it doesn't exist already.
+        The value here will override whatever was in the .ini
+        file.
+
+        :param section: name of the section
+
+        :param name: name of the value
+
+        :param value: the value.  Note that this value is passed to
+         ``ConfigParser.set``, which supports variable interpolation using
+         pyformat (e.g. ``%(some_value)s``).   A raw percent sign not part of
+         an interpolation symbol must therefore be escaped, e.g. ``%%``.
+         The given value may refer to another value already in the file
+         using the interpolation format.
+
+        """
+
+        if not self.file_config.has_section(section):
+            self.file_config.add_section(section)
+        self.file_config.set(section, name, value)
+
+    def get_section_option(
+        self, section: str, name: str, default: Optional[str] = None
+    ) -> Optional[str]:
+        """Return an option from the given section of the .ini file."""
+        if not self.file_config.has_section(section):
+            raise util.CommandError(
+                "No config file %r found, or file has no "
+                "'[%s]' section" % (self.config_file_name, section)
+            )
+        if self.file_config.has_option(section, name):
+            return self.file_config.get(section, name)
+        else:
+            return default
+
+    @overload
+    def get_main_option(self, name: str, default: str) -> str:
+        ...
+
+    @overload
+    def get_main_option(
+        self, name: str, default: Optional[str] = None
+    ) -> Optional[str]:
+        ...
+
+    def get_main_option(
+        self, name: str, default: Optional[str] = None
+    ) -> Optional[str]:
+        """Return an option from the 'main' section of the .ini file.
+
+        This defaults to being a key from the ``[alembic]``
+        section, unless the ``-n/--name`` flag were used to
+        indicate a different section.
+
+        """
+        return self.get_section_option(self.config_ini_section, name, default)
+
+    @util.memoized_property
+    def messaging_opts(self) -> MessagingOptions:
+        """The messaging options."""
+        return cast(
+            MessagingOptions,
+            util.immutabledict(
+                {"quiet": getattr(self.cmd_opts, "quiet", False)}
+            ),
+        )
+
+
+class MessagingOptions(TypedDict, total=False):
+    quiet: bool
+
+
+class CommandLine:
+    def __init__(self, prog: Optional[str] = None) -> None:
+        self._generate_args(prog)
+
+    def _generate_args(self, prog: Optional[str]) -> None:
+        def add_options(
+            fn: Any, parser: Any, positional: Any, kwargs: Any
+        ) -> None:
+            kwargs_opts = {
+                "template": (
+                    "-t",
+                    "--template",
+                    dict(
+                        default="generic",
+                        type=str,
+                        help="Setup template for use with 'init'",
+                    ),
+                ),
+                "message": (
+                    "-m",
+                    "--message",
+                    dict(
+                        type=str, help="Message string to use with 'revision'"
+                    ),
+                ),
+                "sql": (
+                    "--sql",
+                    dict(
+                        action="store_true",
+                        help="Don't emit SQL to database - dump to "
+                        "standard output/file instead. See docs on "
+                        "offline mode.",
+                    ),
+                ),
+                "tag": (
+                    "--tag",
+                    dict(
+                        type=str,
+                        help="Arbitrary 'tag' name - can be used by "
+                        "custom env.py scripts.",
+                    ),
+                ),
+                "head": (
+                    "--head",
+                    dict(
+                        type=str,
+                        help="Specify head revision or <branchname>@head "
+                        "to base new revision on.",
+                    ),
+                ),
+                "splice": (
+                    "--splice",
+                    dict(
+                        action="store_true",
+                        help="Allow a non-head revision as the "
+                        "'head' to splice onto",
+                    ),
+                ),
+                "depends_on": (
+                    "--depends-on",
+                    dict(
+                        action="append",
+                        help="Specify one or more revision identifiers "
+                        "which this revision should depend on.",
+                    ),
+                ),
+                "rev_id": (
+                    "--rev-id",
+                    dict(
+                        type=str,
+                        help="Specify a hardcoded revision id instead of "
+                        "generating one",
+                    ),
+                ),
+                "version_path": (
+                    "--version-path",
+                    dict(
+                        type=str,
+                        help="Specify specific path from config for "
+                        "version file",
+                    ),
+                ),
+                "branch_label": (
+                    "--branch-label",
+                    dict(
+                        type=str,
+                        help="Specify a branch label to apply to the "
+                        "new revision",
+                    ),
+                ),
+                "verbose": (
+                    "-v",
+                    "--verbose",
+                    dict(action="store_true", help="Use more verbose output"),
+                ),
+                "resolve_dependencies": (
+                    "--resolve-dependencies",
+                    dict(
+                        action="store_true",
+                        help="Treat dependency versions as down revisions",
+                    ),
+                ),
+                "autogenerate": (
+                    "--autogenerate",
+                    dict(
+                        action="store_true",
+                        help="Populate revision script with candidate "
+                        "migration operations, based on comparison "
+                        "of database to model.",
+                    ),
+                ),
+                "rev_range": (
+                    "-r",
+                    "--rev-range",
+                    dict(
+                        action="store",
+                        help="Specify a revision range; "
+                        "format is [start]:[end]",
+                    ),
+                ),
+                "indicate_current": (
+                    "-i",
+                    "--indicate-current",
+                    dict(
+                        action="store_true",
+                        help="Indicate the current revision",
+                    ),
+                ),
+                "purge": (
+                    "--purge",
+                    dict(
+                        action="store_true",
+                        help="Unconditionally erase the version table "
+                        "before stamping",
+                    ),
+                ),
+                "package": (
+                    "--package",
+                    dict(
+                        action="store_true",
+                        help="Write empty __init__.py files to the "
+                        "environment and version locations",
+                    ),
+                ),
+            }
+            positional_help = {
+                "directory": "location of scripts directory",
+                "revision": "revision identifier",
+                "revisions": "one or more revisions, or 'heads' for all heads",
+            }
+            for arg in kwargs:
+                if arg in kwargs_opts:
+                    args = kwargs_opts[arg]
+                    args, kw = args[0:-1], args[-1]
+                    parser.add_argument(*args, **kw)
+
+            for arg in positional:
+                if (
+                    arg == "revisions"
+                    or fn in positional_translations
+                    and positional_translations[fn][arg] == "revisions"
+                ):
+                    subparser.add_argument(
+                        "revisions",
+                        nargs="+",
+                        help=positional_help.get("revisions"),
+                    )
+                else:
+                    subparser.add_argument(arg, help=positional_help.get(arg))
+
+        parser = ArgumentParser(prog=prog)
+
+        parser.add_argument(
+            "--version", action="version", version="%%(prog)s %s" % __version__
+        )
+        parser.add_argument(
+            "-c",
+            "--config",
+            type=str,
+            default=os.environ.get("ALEMBIC_CONFIG", "alembic.ini"),
+            help="Alternate config file; defaults to value of "
+            'ALEMBIC_CONFIG environment variable, or "alembic.ini"',
+        )
+        parser.add_argument(
+            "-n",
+            "--name",
+            type=str,
+            default="alembic",
+            help="Name of section in .ini file to " "use for Alembic config",
+        )
+        parser.add_argument(
+            "-x",
+            action="append",
+            help="Additional arguments consumed by "
+            "custom env.py scripts, e.g. -x "
+            "setting1=somesetting -x setting2=somesetting",
+        )
+        parser.add_argument(
+            "--raiseerr",
+            action="store_true",
+            help="Raise a full stack trace on error",
+        )
+        parser.add_argument(
+            "-q",
+            "--quiet",
+            action="store_true",
+            help="Do not log to std output.",
+        )
+        subparsers = parser.add_subparsers()
+
+        positional_translations: Dict[Any, Any] = {
+            command.stamp: {"revision": "revisions"}
+        }
+
+        for fn in [getattr(command, n) for n in dir(command)]:
+            if (
+                inspect.isfunction(fn)
+                and fn.__name__[0] != "_"
+                and fn.__module__ == "alembic.command"
+            ):
+                spec = compat.inspect_getfullargspec(fn)
+                if spec[3] is not None:
+                    positional = spec[0][1 : -len(spec[3])]
+                    kwarg = spec[0][-len(spec[3]) :]
+                else:
+                    positional = spec[0][1:]
+                    kwarg = []
+
+                if fn in positional_translations:
+                    positional = [
+                        positional_translations[fn].get(name, name)
+                        for name in positional
+                    ]
+
+                # parse first line(s) of helptext without a line break
+                help_ = fn.__doc__
+                if help_:
+                    help_text = []
+                    for line in help_.split("\n"):
+                        if not line.strip():
+                            break
+                        else:
+                            help_text.append(line.strip())
+                else:
+                    help_text = []
+                subparser = subparsers.add_parser(
+                    fn.__name__, help=" ".join(help_text)
+                )
+                add_options(fn, subparser, positional, kwarg)
+                subparser.set_defaults(cmd=(fn, positional, kwarg))
+        self.parser = parser
+
+    def run_cmd(self, config: Config, options: Namespace) -> None:
+        fn, positional, kwarg = options.cmd
+
+        try:
+            fn(
+                config,
+                *[getattr(options, k, None) for k in positional],
+                **{k: getattr(options, k, None) for k in kwarg},
+            )
+        except util.CommandError as e:
+            if options.raiseerr:
+                raise
+            else:
+                util.err(str(e), **config.messaging_opts)
+
+    def main(self, argv: Optional[Sequence[str]] = None) -> None:
+        options = self.parser.parse_args(argv)
+        if not hasattr(options, "cmd"):
+            # see http://bugs.python.org/issue9253, argparse
+            # behavior changed incompatibly in py3.3
+            self.parser.error("too few arguments")
+        else:
+            cfg = Config(
+                file_=options.config,
+                ini_section=options.name,
+                cmd_opts=options,
+            )
+            self.run_cmd(cfg, options)
+
+
+def main(
+    argv: Optional[Sequence[str]] = None,
+    prog: Optional[str] = None,
+    **kwargs: Any,
+) -> None:
+    """The console runner function for Alembic."""
+
+    CommandLine(prog=prog).main(argv=argv)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/venv/Lib/site-packages/alembic/context.py b/venv/Lib/site-packages/alembic/context.py
new file mode 100644
index 0000000000000000000000000000000000000000..758fca8756c8bac18ea91888b6de484a11618018
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/context.py
@@ -0,0 +1,5 @@
+from .runtime.environment import EnvironmentContext
+
+# create proxy functions for
+# each method on the EnvironmentContext class.
+EnvironmentContext.create_module_class_proxy(globals(), locals())
diff --git a/venv/Lib/site-packages/alembic/context.pyi b/venv/Lib/site-packages/alembic/context.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..80619fb24f13fadcbaa7fcd4a907b19f231b12e3
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/context.pyi
@@ -0,0 +1,853 @@
+# ### this file stubs are generated by tools/write_pyi.py - do not edit ###
+# ### imports are manually managed
+from __future__ import annotations
+
+from typing import Any
+from typing import Callable
+from typing import Collection
+from typing import ContextManager
+from typing import Dict
+from typing import Iterable
+from typing import List
+from typing import Literal
+from typing import Mapping
+from typing import MutableMapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import TextIO
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine.base import Connection
+    from sqlalchemy.engine.url import URL
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import FetchedValue
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .autogenerate.api import AutogenContext
+    from .config import Config
+    from .operations.ops import MigrationScript
+    from .runtime.migration import _ProxyTransaction
+    from .runtime.migration import MigrationContext
+    from .runtime.migration import MigrationInfo
+    from .script import ScriptDirectory
+
+### end imports ###
+
+def begin_transaction() -> Union[_ProxyTransaction, ContextManager[None]]:
+    """Return a context manager that will
+    enclose an operation within a "transaction",
+    as defined by the environment's offline
+    and transactional DDL settings.
+
+    e.g.::
+
+        with context.begin_transaction():
+            context.run_migrations()
+
+    :meth:`.begin_transaction` is intended to
+    "do the right thing" regardless of
+    calling context:
+
+    * If :meth:`.is_transactional_ddl` is ``False``,
+      returns a "do nothing" context manager
+      which otherwise produces no transactional
+      state or directives.
+    * If :meth:`.is_offline_mode` is ``True``,
+      returns a context manager that will
+      invoke the :meth:`.DefaultImpl.emit_begin`
+      and :meth:`.DefaultImpl.emit_commit`
+      methods, which will produce the string
+      directives ``BEGIN`` and ``COMMIT`` on
+      the output stream, as rendered by the
+      target backend (e.g. SQL Server would
+      emit ``BEGIN TRANSACTION``).
+    * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
+      on the current online connection, which
+      returns a :class:`sqlalchemy.engine.Transaction`
+      object.  This object demarcates a real
+      transaction and is itself a context manager,
+      which will roll back if an exception
+      is raised.
+
+    Note that a custom ``env.py`` script which
+    has more specific transactional needs can of course
+    manipulate the :class:`~sqlalchemy.engine.Connection`
+    directly to produce transactional state in "online"
+    mode.
+
+    """
+
+config: Config
+
+def configure(
+    connection: Optional[Connection] = None,
+    url: Union[str, URL, None] = None,
+    dialect_name: Optional[str] = None,
+    dialect_opts: Optional[Dict[str, Any]] = None,
+    transactional_ddl: Optional[bool] = None,
+    transaction_per_migration: bool = False,
+    output_buffer: Optional[TextIO] = None,
+    starting_rev: Optional[str] = None,
+    tag: Optional[str] = None,
+    template_args: Optional[Dict[str, Any]] = None,
+    render_as_batch: bool = False,
+    target_metadata: Union[MetaData, Sequence[MetaData], None] = None,
+    include_name: Optional[
+        Callable[
+            [
+                Optional[str],
+                Literal[
+                    "schema",
+                    "table",
+                    "column",
+                    "index",
+                    "unique_constraint",
+                    "foreign_key_constraint",
+                ],
+                MutableMapping[
+                    Literal[
+                        "schema_name",
+                        "table_name",
+                        "schema_qualified_table_name",
+                    ],
+                    Optional[str],
+                ],
+            ],
+            bool,
+        ]
+    ] = None,
+    include_object: Optional[
+        Callable[
+            [
+                SchemaItem,
+                Optional[str],
+                Literal[
+                    "schema",
+                    "table",
+                    "column",
+                    "index",
+                    "unique_constraint",
+                    "foreign_key_constraint",
+                ],
+                bool,
+                Optional[SchemaItem],
+            ],
+            bool,
+        ]
+    ] = None,
+    include_schemas: bool = False,
+    process_revision_directives: Optional[
+        Callable[
+            [
+                MigrationContext,
+                Union[str, Iterable[Optional[str]], Iterable[str]],
+                List[MigrationScript],
+            ],
+            None,
+        ]
+    ] = None,
+    compare_type: Union[
+        bool,
+        Callable[
+            [
+                MigrationContext,
+                Column[Any],
+                Column[Any],
+                TypeEngine[Any],
+                TypeEngine[Any],
+            ],
+            Optional[bool],
+        ],
+    ] = True,
+    compare_server_default: Union[
+        bool,
+        Callable[
+            [
+                MigrationContext,
+                Column[Any],
+                Column[Any],
+                Optional[str],
+                Optional[FetchedValue],
+                Optional[str],
+            ],
+            Optional[bool],
+        ],
+    ] = False,
+    render_item: Optional[
+        Callable[[str, Any, AutogenContext], Union[str, Literal[False]]]
+    ] = None,
+    literal_binds: bool = False,
+    upgrade_token: str = "upgrades",
+    downgrade_token: str = "downgrades",
+    alembic_module_prefix: str = "op.",
+    sqlalchemy_module_prefix: str = "sa.",
+    user_module_prefix: Optional[str] = None,
+    on_version_apply: Optional[
+        Callable[
+            [
+                MigrationContext,
+                MigrationInfo,
+                Collection[Any],
+                Mapping[str, Any],
+            ],
+            None,
+        ]
+    ] = None,
+    **kw: Any,
+) -> None:
+    """Configure a :class:`.MigrationContext` within this
+    :class:`.EnvironmentContext` which will provide database
+    connectivity and other configuration to a series of
+    migration scripts.
+
+    Many methods on :class:`.EnvironmentContext` require that
+    this method has been called in order to function, as they
+    ultimately need to have database access or at least access
+    to the dialect in use.  Those which do are documented as such.
+
+    The important thing needed by :meth:`.configure` is a
+    means to determine what kind of database dialect is in use.
+    An actual connection to that database is needed only if
+    the :class:`.MigrationContext` is to be used in
+    "online" mode.
+
+    If the :meth:`.is_offline_mode` function returns ``True``,
+    then no connection is needed here.  Otherwise, the
+    ``connection`` parameter should be present as an
+    instance of :class:`sqlalchemy.engine.Connection`.
+
+    This function is typically called from the ``env.py``
+    script within a migration environment.  It can be called
+    multiple times for an invocation.  The most recent
+    :class:`~sqlalchemy.engine.Connection`
+    for which it was called is the one that will be operated upon
+    by the next call to :meth:`.run_migrations`.
+
+    General parameters:
+
+    :param connection: a :class:`~sqlalchemy.engine.Connection`
+     to use
+     for SQL execution in "online" mode.  When present, is also
+     used to determine the type of dialect in use.
+    :param url: a string database url, or a
+     :class:`sqlalchemy.engine.url.URL` object.
+     The type of dialect to be used will be derived from this if
+     ``connection`` is not passed.
+    :param dialect_name: string name of a dialect, such as
+     "postgresql", "mssql", etc.
+     The type of dialect to be used will be derived from this if
+     ``connection`` and ``url`` are not passed.
+    :param dialect_opts: dictionary of options to be passed to dialect
+     constructor.
+    :param transactional_ddl: Force the usage of "transactional"
+     DDL on or off;
+     this otherwise defaults to whether or not the dialect in
+     use supports it.
+    :param transaction_per_migration: if True, nest each migration script
+     in a transaction rather than the full series of migrations to
+     run.
+    :param output_buffer: a file-like object that will be used
+     for textual output
+     when the ``--sql`` option is used to generate SQL scripts.
+     Defaults to
+     ``sys.stdout`` if not passed here and also not present on
+     the :class:`.Config`
+     object.  The value here overrides that of the :class:`.Config`
+     object.
+    :param output_encoding: when using ``--sql`` to generate SQL
+     scripts, apply this encoding to the string output.
+    :param literal_binds: when using ``--sql`` to generate SQL
+     scripts, pass through the ``literal_binds`` flag to the compiler
+     so that any literal values that would ordinarily be bound
+     parameters are converted to plain strings.
+
+     .. warning:: Dialects can typically only handle simple datatypes
+        like strings and numbers for auto-literal generation.  Datatypes
+        like dates, intervals, and others may still require manual
+        formatting, typically using :meth:`.Operations.inline_literal`.
+
+     .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
+        versions prior to 0.8 where this feature is not supported.
+
+     .. seealso::
+
+        :meth:`.Operations.inline_literal`
+
+    :param starting_rev: Override the "starting revision" argument
+     when using ``--sql`` mode.
+    :param tag: a string tag for usage by custom ``env.py`` scripts.
+     Set via the ``--tag`` option, can be overridden here.
+    :param template_args: dictionary of template arguments which
+     will be added to the template argument environment when
+     running the "revision" command.   Note that the script environment
+     is only run within the "revision" command if the --autogenerate
+     option is used, or if the option "revision_environment=true"
+     is present in the alembic.ini file.
+
+    :param version_table: The name of the Alembic version table.
+     The default is ``'alembic_version'``.
+    :param version_table_schema: Optional schema to place version
+     table within.
+    :param version_table_pk: boolean, whether the Alembic version table
+     should use a primary key constraint for the "value" column; this
+     only takes effect when the table is first created.
+     Defaults to True; setting to False should not be necessary and is
+     here for backwards compatibility reasons.
+    :param on_version_apply: a callable or collection of callables to be
+        run for each migration step.
+        The callables will be run in the order they are given, once for
+        each migration step, after the respective operation has been
+        applied but before its transaction is finalized.
+        Each callable accepts no positional arguments and the following
+        keyword arguments:
+
+        * ``ctx``: the :class:`.MigrationContext` running the migration,
+        * ``step``: a :class:`.MigrationInfo` representing the
+          step currently being applied,
+        * ``heads``: a collection of version strings representing the
+          current heads,
+        * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`.
+
+    Parameters specific to the autogenerate feature, when
+    ``alembic revision`` is run with the ``--autogenerate`` feature:
+
+    :param target_metadata: a :class:`sqlalchemy.schema.MetaData`
+     object, or a sequence of :class:`~sqlalchemy.schema.MetaData`
+     objects, that will be consulted during autogeneration.
+     The tables present in each :class:`~sqlalchemy.schema.MetaData`
+     will be compared against
+     what is locally available on the target
+     :class:`~sqlalchemy.engine.Connection`
+     to produce candidate upgrade/downgrade operations.
+    :param compare_type: Indicates type comparison behavior during
+     an autogenerate
+     operation.  Defaults to ``True`` turning on type comparison, which
+     has good accuracy on most backends.   See :ref:`compare_types`
+     for an example as well as information on other type
+     comparison options. Set to ``False`` which disables type
+     comparison. A callable can also be passed to provide custom type
+     comparison, see :ref:`compare_types` for additional details.
+
+     .. versionchanged:: 1.12.0 The default value of
+        :paramref:`.EnvironmentContext.configure.compare_type` has been
+        changed to ``True``.
+
+     .. seealso::
+
+        :ref:`compare_types`
+
+        :paramref:`.EnvironmentContext.configure.compare_server_default`
+
+    :param compare_server_default: Indicates server default comparison
+     behavior during
+     an autogenerate operation.  Defaults to ``False`` which disables
+     server default
+     comparison.  Set to  ``True`` to turn on server default comparison,
+     which has
+     varied accuracy depending on backend.
+
+     To customize server default comparison behavior, a callable may
+     be specified
+     which can filter server default comparisons during an
+     autogenerate operation.
+     defaults during an autogenerate operation.   The format of this
+     callable is::
+
+        def my_compare_server_default(context, inspected_column,
+                    metadata_column, inspected_default, metadata_default,
+                    rendered_metadata_default):
+            # return True if the defaults are different,
+            # False if not, or None to allow the default implementation
+            # to compare these defaults
+            return None
+
+        context.configure(
+            # ...
+            compare_server_default = my_compare_server_default
+        )
+
+     ``inspected_column`` is a dictionary structure as returned by
+     :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
+     ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
+     the local model environment.
+
+     A return value of ``None`` indicates to allow default server default
+     comparison
+     to proceed.  Note that some backends such as Postgresql actually
+     execute
+     the two defaults on the database side to compare for equivalence.
+
+     .. seealso::
+
+        :paramref:`.EnvironmentContext.configure.compare_type`
+
+    :param include_name: A callable function which is given
+     the chance to return ``True`` or ``False`` for any database reflected
+     object based on its name, including database schema names when
+     the :paramref:`.EnvironmentContext.configure.include_schemas` flag
+     is set to ``True``.
+
+     The function accepts the following positional arguments:
+
+     * ``name``: the name of the object, such as schema name or table name.
+       Will be ``None`` when indicating the default schema name of the
+       database connection.
+     * ``type``: a string describing the type of object; currently
+       ``"schema"``, ``"table"``, ``"column"``, ``"index"``,
+       ``"unique_constraint"``, or ``"foreign_key_constraint"``
+     * ``parent_names``: a dictionary of "parent" object names, that are
+       relative to the name being given.  Keys in this dictionary may
+       include:  ``"schema_name"``, ``"table_name"`` or
+       ``"schema_qualified_table_name"``.
+
+     E.g.::
+
+        def include_name(name, type_, parent_names):
+            if type_ == "schema":
+                return name in ["schema_one", "schema_two"]
+            else:
+                return True
+
+        context.configure(
+            # ...
+            include_schemas = True,
+            include_name = include_name
+        )
+
+     .. seealso::
+
+        :ref:`autogenerate_include_hooks`
+
+        :paramref:`.EnvironmentContext.configure.include_object`
+
+        :paramref:`.EnvironmentContext.configure.include_schemas`
+
+
+    :param include_object: A callable function which is given
+     the chance to return ``True`` or ``False`` for any object,
+     indicating if the given object should be considered in the
+     autogenerate sweep.
+
+     The function accepts the following positional arguments:
+
+     * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
+       as a :class:`~sqlalchemy.schema.Table`,
+       :class:`~sqlalchemy.schema.Column`,
+       :class:`~sqlalchemy.schema.Index`
+       :class:`~sqlalchemy.schema.UniqueConstraint`,
+       or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
+     * ``name``: the name of the object. This is typically available
+       via ``object.name``.
+     * ``type``: a string describing the type of object; currently
+       ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
+       or ``"foreign_key_constraint"``
+     * ``reflected``: ``True`` if the given object was produced based on
+       table reflection, ``False`` if it's from a local :class:`.MetaData`
+       object.
+     * ``compare_to``: the object being compared against, if available,
+       else ``None``.
+
+     E.g.::
+
+        def include_object(object, name, type_, reflected, compare_to):
+            if (type_ == "column" and
+                not reflected and
+                object.info.get("skip_autogenerate", False)):
+                return False
+            else:
+                return True
+
+        context.configure(
+            # ...
+            include_object = include_object
+        )
+
+     For the use case of omitting specific schemas from a target database
+     when :paramref:`.EnvironmentContext.configure.include_schemas` is
+     set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema`
+     attribute can be checked for each :class:`~sqlalchemy.schema.Table`
+     object passed to the hook, however it is much more efficient
+     to filter on schemas before reflection of objects takes place
+     using the :paramref:`.EnvironmentContext.configure.include_name`
+     hook.
+
+     .. seealso::
+
+        :ref:`autogenerate_include_hooks`
+
+        :paramref:`.EnvironmentContext.configure.include_name`
+
+        :paramref:`.EnvironmentContext.configure.include_schemas`
+
+    :param render_as_batch: if True, commands which alter elements
+     within a table will be placed under a ``with batch_alter_table():``
+     directive, so that batch migrations will take place.
+
+     .. seealso::
+
+        :ref:`batch_migrations`
+
+    :param include_schemas: If True, autogenerate will scan across
+     all schemas located by the SQLAlchemy
+     :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
+     method, and include all differences in tables found across all
+     those schemas.  When using this option, you may want to also
+     use the :paramref:`.EnvironmentContext.configure.include_name`
+     parameter to specify a callable which
+     can filter the tables/schemas that get included.
+
+     .. seealso::
+
+        :ref:`autogenerate_include_hooks`
+
+        :paramref:`.EnvironmentContext.configure.include_name`
+
+        :paramref:`.EnvironmentContext.configure.include_object`
+
+    :param render_item: Callable that can be used to override how
+     any schema item, i.e. column, constraint, type,
+     etc., is rendered for autogenerate.  The callable receives a
+     string describing the type of object, the object, and
+     the autogen context.  If it returns False, the
+     default rendering method will be used.  If it returns None,
+     the item will not be rendered in the context of a Table
+     construct, that is, can be used to skip columns or constraints
+     within op.create_table()::
+
+        def my_render_column(type_, col, autogen_context):
+            if type_ == "column" and isinstance(col, MySpecialCol):
+                return repr(col)
+            else:
+                return False
+
+        context.configure(
+            # ...
+            render_item = my_render_column
+        )
+
+     Available values for the type string include: ``"column"``,
+     ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
+     ``"type"``, ``"server_default"``.
+
+     .. seealso::
+
+        :ref:`autogen_render_types`
+
+    :param upgrade_token: When autogenerate completes, the text of the
+     candidate upgrade operations will be present in this template
+     variable when ``script.py.mako`` is rendered.  Defaults to
+     ``upgrades``.
+    :param downgrade_token: When autogenerate completes, the text of the
+     candidate downgrade operations will be present in this
+     template variable when ``script.py.mako`` is rendered.  Defaults to
+     ``downgrades``.
+
+    :param alembic_module_prefix: When autogenerate refers to Alembic
+     :mod:`alembic.operations` constructs, this prefix will be used
+     (i.e. ``op.create_table``)  Defaults to "``op.``".
+     Can be ``None`` to indicate no prefix.
+
+    :param sqlalchemy_module_prefix: When autogenerate refers to
+     SQLAlchemy
+     :class:`~sqlalchemy.schema.Column` or type classes, this prefix
+     will be used
+     (i.e. ``sa.Column("somename", sa.Integer)``)  Defaults to "``sa.``".
+     Can be ``None`` to indicate no prefix.
+     Note that when dialect-specific types are rendered, autogenerate
+     will render them using the dialect module name, i.e. ``mssql.BIT()``,
+     ``postgresql.UUID()``.
+
+    :param user_module_prefix: When autogenerate refers to a SQLAlchemy
+     type (e.g. :class:`.TypeEngine`) where the module name is not
+     under the ``sqlalchemy`` namespace, this prefix will be used
+     within autogenerate.  If left at its default of
+     ``None``, the ``__module__`` attribute of the type is used to
+     render the import module.   It's a good practice to set this
+     and to have all custom types be available from a fixed module space,
+     in order to future-proof migration files against reorganizations
+     in modules.
+
+     .. seealso::
+
+        :ref:`autogen_module_prefix`
+
+    :param process_revision_directives: a callable function that will
+     be passed a structure representing the end result of an autogenerate
+     or plain "revision" operation, which can be manipulated to affect
+     how the ``alembic revision`` command ultimately outputs new
+     revision scripts.   The structure of the callable is::
+
+        def process_revision_directives(context, revision, directives):
+            pass
+
+     The ``directives`` parameter is a Python list containing
+     a single :class:`.MigrationScript` directive, which represents
+     the revision file to be generated.    This list as well as its
+     contents may be freely modified to produce any set of commands.
+     The section :ref:`customizing_revision` shows an example of
+     doing this.  The ``context`` parameter is the
+     :class:`.MigrationContext` in use,
+     and ``revision`` is a tuple of revision identifiers representing the
+     current revision of the database.
+
+     The callable is invoked at all times when the ``--autogenerate``
+     option is passed to ``alembic revision``.  If ``--autogenerate``
+     is not passed, the callable is invoked only if the
+     ``revision_environment`` variable is set to True in the Alembic
+     configuration, in which case the given ``directives`` collection
+     will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps`
+     collections for ``.upgrade_ops`` and ``.downgrade_ops``.  The
+     ``--autogenerate`` option itself can be inferred by inspecting
+     ``context.config.cmd_opts.autogenerate``.
+
+     The callable function may optionally be an instance of
+     a :class:`.Rewriter` object.  This is a helper object that
+     assists in the production of autogenerate-stream rewriter functions.
+
+     .. seealso::
+
+         :ref:`customizing_revision`
+
+         :ref:`autogen_rewriter`
+
+         :paramref:`.command.revision.process_revision_directives`
+
+    Parameters specific to individual backends:
+
+    :param mssql_batch_separator: The "batch separator" which will
+     be placed between each statement when generating offline SQL Server
+     migrations.  Defaults to ``GO``.  Note this is in addition to the
+     customary semicolon ``;`` at the end of each statement; SQL Server
+     considers the "batch separator" to denote the end of an
+     individual statement execution, and cannot group certain
+     dependent operations in one step.
+    :param oracle_batch_separator: The "batch separator" which will
+     be placed between each statement when generating offline
+     Oracle migrations.  Defaults to ``/``.  Oracle doesn't add a
+     semicolon between statements like most other backends.
+
+    """
+
+def execute(
+    sql: Union[Executable, str],
+    execution_options: Optional[Dict[str, Any]] = None,
+) -> None:
+    """Execute the given SQL using the current change context.
+
+    The behavior of :meth:`.execute` is the same
+    as that of :meth:`.Operations.execute`.  Please see that
+    function's documentation for full detail including
+    caveats and limitations.
+
+    This function requires that a :class:`.MigrationContext` has
+    first been made available via :meth:`.configure`.
+
+    """
+
+def get_bind() -> Connection:
+    """Return the current 'bind'.
+
+    In "online" mode, this is the
+    :class:`sqlalchemy.engine.Connection` currently being used
+    to emit SQL to the database.
+
+    This function requires that a :class:`.MigrationContext`
+    has first been made available via :meth:`.configure`.
+
+    """
+
+def get_context() -> MigrationContext:
+    """Return the current :class:`.MigrationContext` object.
+
+    If :meth:`.EnvironmentContext.configure` has not been
+    called yet, raises an exception.
+
+    """
+
+def get_head_revision() -> Union[str, Tuple[str, ...], None]:
+    """Return the hex identifier of the 'head' script revision.
+
+    If the script directory has multiple heads, this
+    method raises a :class:`.CommandError`;
+    :meth:`.EnvironmentContext.get_head_revisions` should be preferred.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    .. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
+
+    """
+
+def get_head_revisions() -> Union[str, Tuple[str, ...], None]:
+    """Return the hex identifier of the 'heads' script revision(s).
+
+    This returns a tuple containing the version number of all
+    heads in the script directory.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    """
+
+def get_revision_argument() -> Union[str, Tuple[str, ...], None]:
+    """Get the 'destination' revision argument.
+
+    This is typically the argument passed to the
+    ``upgrade`` or ``downgrade`` command.
+
+    If it was specified as ``head``, the actual
+    version number is returned; if specified
+    as ``base``, ``None`` is returned.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    """
+
+def get_starting_revision_argument() -> Union[str, Tuple[str, ...], None]:
+    """Return the 'starting revision' argument,
+    if the revision was passed using ``start:end``.
+
+    This is only meaningful in "offline" mode.
+    Returns ``None`` if no value is available
+    or was configured.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    """
+
+def get_tag_argument() -> Optional[str]:
+    """Return the value passed for the ``--tag`` argument, if any.
+
+    The ``--tag`` argument is not used directly by Alembic,
+    but is available for custom ``env.py`` configurations that
+    wish to use it; particularly for offline generation scripts
+    that wish to generate tagged filenames.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    .. seealso::
+
+        :meth:`.EnvironmentContext.get_x_argument` - a newer and more
+        open ended system of extending ``env.py`` scripts via the command
+        line.
+
+    """
+
+@overload
+def get_x_argument(as_dictionary: Literal[False]) -> List[str]: ...
+@overload
+def get_x_argument(as_dictionary: Literal[True]) -> Dict[str, str]: ...
+@overload
+def get_x_argument(
+    as_dictionary: bool = ...,
+) -> Union[List[str], Dict[str, str]]:
+    """Return the value(s) passed for the ``-x`` argument, if any.
+
+    The ``-x`` argument is an open ended flag that allows any user-defined
+    value or values to be passed on the command line, then available
+    here for consumption by a custom ``env.py`` script.
+
+    The return value is a list, returned directly from the ``argparse``
+    structure.  If ``as_dictionary=True`` is passed, the ``x`` arguments
+    are parsed using ``key=value`` format into a dictionary that is
+    then returned. If there is no ``=`` in the argument, value is an empty
+    string.
+
+    .. versionchanged:: 1.13.1 Support ``as_dictionary=True`` when
+       arguments are passed without the ``=`` symbol.
+
+    For example, to support passing a database URL on the command line,
+    the standard ``env.py`` script can be modified like this::
+
+        cmd_line_url = context.get_x_argument(
+            as_dictionary=True).get('dbname')
+        if cmd_line_url:
+            engine = create_engine(cmd_line_url)
+        else:
+            engine = engine_from_config(
+                    config.get_section(config.config_ini_section),
+                    prefix='sqlalchemy.',
+                    poolclass=pool.NullPool)
+
+    This then takes effect by running the ``alembic`` script as::
+
+        alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    .. seealso::
+
+        :meth:`.EnvironmentContext.get_tag_argument`
+
+        :attr:`.Config.cmd_opts`
+
+    """
+
+def is_offline_mode() -> bool:
+    """Return True if the current migrations environment
+    is running in "offline mode".
+
+    This is ``True`` or ``False`` depending
+    on the ``--sql`` flag passed.
+
+    This function does not require that the :class:`.MigrationContext`
+    has been configured.
+
+    """
+
+def is_transactional_ddl() -> bool:
+    """Return True if the context is configured to expect a
+    transactional DDL capable backend.
+
+    This defaults to the type of database in use, and
+    can be overridden by the ``transactional_ddl`` argument
+    to :meth:`.configure`
+
+    This function requires that a :class:`.MigrationContext`
+    has first been made available via :meth:`.configure`.
+
+    """
+
+def run_migrations(**kw: Any) -> None:
+    """Run migrations as determined by the current command line
+    configuration
+    as well as versioning information present (or not) in the current
+    database connection (if one is present).
+
+    The function accepts optional ``**kw`` arguments.   If these are
+    passed, they are sent directly to the ``upgrade()`` and
+    ``downgrade()``
+    functions within each target revision file.   By modifying the
+    ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
+    functions accept arguments, parameters can be passed here so that
+    contextual information, usually information to identify a particular
+    database in use, can be passed from a custom ``env.py`` script
+    to the migration functions.
+
+    This function requires that a :class:`.MigrationContext` has
+    first been made available via :meth:`.configure`.
+
+    """
+
+script: ScriptDirectory
+
+def static_output(text: str) -> None:
+    """Emit text directly to the "offline" SQL stream.
+
+    Typically this is for emitting comments that
+    start with --.  The statement is not treated
+    as a SQL execution, no ; or batch separator
+    is added, etc.
+
+    """
diff --git a/venv/Lib/site-packages/alembic/ddl/__init__.py b/venv/Lib/site-packages/alembic/ddl/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2f72b3dd8d3748b36cb7acfcda7abf8468b6926
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/__init__.py
@@ -0,0 +1,6 @@
+from . import mssql
+from . import mysql
+from . import oracle
+from . import postgresql
+from . import sqlite
+from .impl import DefaultImpl as DefaultImpl
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38564cd537db75ebeac9a8f584e3eae4a2a9221d
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/_autogen.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/_autogen.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..426a0c4f68a2a5f20239837c1d8632d0221cf7e0
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/_autogen.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/base.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/base.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1956e6499af0a890b56b8074205bca09bd85674
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/base.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/impl.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/impl.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c6f8fe52fb89f24c12f034f0479fe29e346ff3b
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/impl.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/mssql.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/mssql.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1eedc835ee3f96b8b8b1d36ba1abc101fd20138
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/mssql.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/mysql.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/mysql.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e0ae8eee5aba3a48f02d4fa51e5be65dcb19d25
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/mysql.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/oracle.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/oracle.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21a3fa49572dc1a8f76a1f6fc1ceaf9082823a29
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/oracle.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/postgresql.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/postgresql.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04d0b930bd59510c3f612f733e0d0343ba4f5c56
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/postgresql.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/__pycache__/sqlite.cpython-311.pyc b/venv/Lib/site-packages/alembic/ddl/__pycache__/sqlite.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67f2774a6fe5baf68c1de146a9b4f0e2e3a4f04c
Binary files /dev/null and b/venv/Lib/site-packages/alembic/ddl/__pycache__/sqlite.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/ddl/_autogen.py b/venv/Lib/site-packages/alembic/ddl/_autogen.py
new file mode 100644
index 0000000000000000000000000000000000000000..e22153c49c761451c074c11de6c7ea53d20c1149
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/_autogen.py
@@ -0,0 +1,325 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from typing import Any
+from typing import ClassVar
+from typing import Dict
+from typing import Generic
+from typing import NamedTuple
+from typing import Optional
+from typing import Sequence
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy.sql.schema import Constraint
+from sqlalchemy.sql.schema import ForeignKeyConstraint
+from sqlalchemy.sql.schema import Index
+from sqlalchemy.sql.schema import UniqueConstraint
+from typing_extensions import TypeGuard
+
+from .. import util
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from alembic.autogenerate.api import AutogenContext
+    from alembic.ddl.impl import DefaultImpl
+
+CompareConstraintType = Union[Constraint, Index]
+
+_C = TypeVar("_C", bound=CompareConstraintType)
+
+_clsreg: Dict[str, Type[_constraint_sig]] = {}
+
+
+class ComparisonResult(NamedTuple):
+    status: Literal["equal", "different", "skip"]
+    message: str
+
+    @property
+    def is_equal(self) -> bool:
+        return self.status == "equal"
+
+    @property
+    def is_different(self) -> bool:
+        return self.status == "different"
+
+    @property
+    def is_skip(self) -> bool:
+        return self.status == "skip"
+
+    @classmethod
+    def Equal(cls) -> ComparisonResult:
+        """the constraints are equal."""
+        return cls("equal", "The two constraints are equal")
+
+    @classmethod
+    def Different(cls, reason: Union[str, Sequence[str]]) -> ComparisonResult:
+        """the constraints are different for the provided reason(s)."""
+        return cls("different", ", ".join(util.to_list(reason)))
+
+    @classmethod
+    def Skip(cls, reason: Union[str, Sequence[str]]) -> ComparisonResult:
+        """the constraint cannot be compared for the provided reason(s).
+
+        The message is logged, but the constraints will be otherwise
+        considered equal, meaning that no migration command will be
+        generated.
+        """
+        return cls("skip", ", ".join(util.to_list(reason)))
+
+
+class _constraint_sig(Generic[_C]):
+    const: _C
+
+    _sig: Tuple[Any, ...]
+    name: Optional[sqla_compat._ConstraintNameDefined]
+
+    impl: DefaultImpl
+
+    _is_index: ClassVar[bool] = False
+    _is_fk: ClassVar[bool] = False
+    _is_uq: ClassVar[bool] = False
+
+    _is_metadata: bool
+
+    def __init_subclass__(cls) -> None:
+        cls._register()
+
+    @classmethod
+    def _register(cls):
+        raise NotImplementedError()
+
+    def __init__(
+        self, is_metadata: bool, impl: DefaultImpl, const: _C
+    ) -> None:
+        raise NotImplementedError()
+
+    def compare_to_reflected(
+        self, other: _constraint_sig[Any]
+    ) -> ComparisonResult:
+        assert self.impl is other.impl
+        assert self._is_metadata
+        assert not other._is_metadata
+
+        return self._compare_to_reflected(other)
+
+    def _compare_to_reflected(
+        self, other: _constraint_sig[_C]
+    ) -> ComparisonResult:
+        raise NotImplementedError()
+
+    @classmethod
+    def from_constraint(
+        cls, is_metadata: bool, impl: DefaultImpl, constraint: _C
+    ) -> _constraint_sig[_C]:
+        # these could be cached by constraint/impl, however, if the
+        # constraint is modified in place, then the sig is wrong.  the mysql
+        # impl currently does this, and if we fixed that we can't be sure
+        # someone else might do it too, so play it safe.
+        sig = _clsreg[constraint.__visit_name__](is_metadata, impl, constraint)
+        return sig
+
+    def md_name_to_sql_name(self, context: AutogenContext) -> Optional[str]:
+        return sqla_compat._get_constraint_final_name(
+            self.const, context.dialect
+        )
+
+    @util.memoized_property
+    def is_named(self):
+        return sqla_compat._constraint_is_named(self.const, self.impl.dialect)
+
+    @util.memoized_property
+    def unnamed(self) -> Tuple[Any, ...]:
+        return self._sig
+
+    @util.memoized_property
+    def unnamed_no_options(self) -> Tuple[Any, ...]:
+        raise NotImplementedError()
+
+    @util.memoized_property
+    def _full_sig(self) -> Tuple[Any, ...]:
+        return (self.name,) + self.unnamed
+
+    def __eq__(self, other) -> bool:
+        return self._full_sig == other._full_sig
+
+    def __ne__(self, other) -> bool:
+        return self._full_sig != other._full_sig
+
+    def __hash__(self) -> int:
+        return hash(self._full_sig)
+
+
+class _uq_constraint_sig(_constraint_sig[UniqueConstraint]):
+    _is_uq = True
+
+    @classmethod
+    def _register(cls) -> None:
+        _clsreg["unique_constraint"] = cls
+
+    is_unique = True
+
+    def __init__(
+        self,
+        is_metadata: bool,
+        impl: DefaultImpl,
+        const: UniqueConstraint,
+    ) -> None:
+        self.impl = impl
+        self.const = const
+        self.name = sqla_compat.constraint_name_or_none(const.name)
+        self._sig = tuple(sorted([col.name for col in const.columns]))
+        self._is_metadata = is_metadata
+
+    @property
+    def column_names(self) -> Tuple[str, ...]:
+        return tuple([col.name for col in self.const.columns])
+
+    def _compare_to_reflected(
+        self, other: _constraint_sig[_C]
+    ) -> ComparisonResult:
+        assert self._is_metadata
+        metadata_obj = self
+        conn_obj = other
+
+        assert is_uq_sig(conn_obj)
+        return self.impl.compare_unique_constraint(
+            metadata_obj.const, conn_obj.const
+        )
+
+
+class _ix_constraint_sig(_constraint_sig[Index]):
+    _is_index = True
+
+    name: sqla_compat._ConstraintName
+
+    @classmethod
+    def _register(cls) -> None:
+        _clsreg["index"] = cls
+
+    def __init__(
+        self, is_metadata: bool, impl: DefaultImpl, const: Index
+    ) -> None:
+        self.impl = impl
+        self.const = const
+        self.name = const.name
+        self.is_unique = bool(const.unique)
+        self._is_metadata = is_metadata
+
+    def _compare_to_reflected(
+        self, other: _constraint_sig[_C]
+    ) -> ComparisonResult:
+        assert self._is_metadata
+        metadata_obj = self
+        conn_obj = other
+
+        assert is_index_sig(conn_obj)
+        return self.impl.compare_indexes(metadata_obj.const, conn_obj.const)
+
+    @util.memoized_property
+    def has_expressions(self):
+        return sqla_compat.is_expression_index(self.const)
+
+    @util.memoized_property
+    def column_names(self) -> Tuple[str, ...]:
+        return tuple([col.name for col in self.const.columns])
+
+    @util.memoized_property
+    def column_names_optional(self) -> Tuple[Optional[str], ...]:
+        return tuple(
+            [getattr(col, "name", None) for col in self.const.expressions]
+        )
+
+    @util.memoized_property
+    def is_named(self):
+        return True
+
+    @util.memoized_property
+    def unnamed(self):
+        return (self.is_unique,) + self.column_names_optional
+
+
+class _fk_constraint_sig(_constraint_sig[ForeignKeyConstraint]):
+    _is_fk = True
+
+    @classmethod
+    def _register(cls) -> None:
+        _clsreg["foreign_key_constraint"] = cls
+
+    def __init__(
+        self,
+        is_metadata: bool,
+        impl: DefaultImpl,
+        const: ForeignKeyConstraint,
+    ) -> None:
+        self._is_metadata = is_metadata
+
+        self.impl = impl
+        self.const = const
+
+        self.name = sqla_compat.constraint_name_or_none(const.name)
+
+        (
+            self.source_schema,
+            self.source_table,
+            self.source_columns,
+            self.target_schema,
+            self.target_table,
+            self.target_columns,
+            onupdate,
+            ondelete,
+            deferrable,
+            initially,
+        ) = sqla_compat._fk_spec(const)
+
+        self._sig: Tuple[Any, ...] = (
+            self.source_schema,
+            self.source_table,
+            tuple(self.source_columns),
+            self.target_schema,
+            self.target_table,
+            tuple(self.target_columns),
+        ) + (
+            (None if onupdate.lower() == "no action" else onupdate.lower())
+            if onupdate
+            else None,
+            (None if ondelete.lower() == "no action" else ondelete.lower())
+            if ondelete
+            else None,
+            # convert initially + deferrable into one three-state value
+            "initially_deferrable"
+            if initially and initially.lower() == "deferred"
+            else "deferrable"
+            if deferrable
+            else "not deferrable",
+        )
+
+    @util.memoized_property
+    def unnamed_no_options(self):
+        return (
+            self.source_schema,
+            self.source_table,
+            tuple(self.source_columns),
+            self.target_schema,
+            self.target_table,
+            tuple(self.target_columns),
+        )
+
+
+def is_index_sig(sig: _constraint_sig) -> TypeGuard[_ix_constraint_sig]:
+    return sig._is_index
+
+
+def is_uq_sig(sig: _constraint_sig) -> TypeGuard[_uq_constraint_sig]:
+    return sig._is_uq
+
+
+def is_fk_sig(sig: _constraint_sig) -> TypeGuard[_fk_constraint_sig]:
+    return sig._is_fk
diff --git a/venv/Lib/site-packages/alembic/ddl/base.py b/venv/Lib/site-packages/alembic/ddl/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a85a5c198affa8f50fcfe4da126836627ae472c
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/base.py
@@ -0,0 +1,335 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import functools
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import exc
+from sqlalchemy import Integer
+from sqlalchemy import types as sqltypes
+from sqlalchemy.ext.compiler import compiles
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import DDLElement
+from sqlalchemy.sql.elements import quoted_name
+
+from ..util.sqla_compat import _columns_for_constraint  # noqa
+from ..util.sqla_compat import _find_columns  # noqa
+from ..util.sqla_compat import _fk_spec  # noqa
+from ..util.sqla_compat import _is_type_bound  # noqa
+from ..util.sqla_compat import _table_for_constraint  # noqa
+
+if TYPE_CHECKING:
+    from typing import Any
+
+    from sqlalchemy.sql.compiler import Compiled
+    from sqlalchemy.sql.compiler import DDLCompiler
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.functions import Function
+    from sqlalchemy.sql.schema import FetchedValue
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .impl import DefaultImpl
+    from ..util.sqla_compat import Computed
+    from ..util.sqla_compat import Identity
+
+_ServerDefault = Union["TextClause", "FetchedValue", "Function[Any]", str]
+
+
+class AlterTable(DDLElement):
+
+    """Represent an ALTER TABLE statement.
+
+    Only the string name and optional schema name of the table
+    is required, not a full Table object.
+
+    """
+
+    def __init__(
+        self,
+        table_name: str,
+        schema: Optional[Union[quoted_name, str]] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.schema = schema
+
+
+class RenameTable(AlterTable):
+    def __init__(
+        self,
+        old_table_name: str,
+        new_table_name: Union[quoted_name, str],
+        schema: Optional[Union[quoted_name, str]] = None,
+    ) -> None:
+        super().__init__(old_table_name, schema=schema)
+        self.new_table_name = new_table_name
+
+
+class AlterColumn(AlterTable):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        schema: Optional[str] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_nullable: Optional[bool] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        super().__init__(name, schema=schema)
+        self.column_name = column_name
+        self.existing_type = (
+            sqltypes.to_instance(existing_type)
+            if existing_type is not None
+            else None
+        )
+        self.existing_nullable = existing_nullable
+        self.existing_server_default = existing_server_default
+        self.existing_comment = existing_comment
+
+
+class ColumnNullable(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, nullable: bool, **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.nullable = nullable
+
+
+class ColumnType(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, type_: TypeEngine, **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.type_ = sqltypes.to_instance(type_)
+
+
+class ColumnName(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, newname: str, **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.newname = newname
+
+
+class ColumnDefault(AlterColumn):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        default: Optional[_ServerDefault],
+        **kw,
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.default = default
+
+
+class ComputedColumnDefault(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, default: Optional[Computed], **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.default = default
+
+
+class IdentityColumnDefault(AlterColumn):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        default: Optional[Identity],
+        impl: DefaultImpl,
+        **kw,
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.default = default
+        self.impl = impl
+
+
+class AddColumn(AlterTable):
+    def __init__(
+        self,
+        name: str,
+        column: Column[Any],
+        schema: Optional[Union[quoted_name, str]] = None,
+    ) -> None:
+        super().__init__(name, schema=schema)
+        self.column = column
+
+
+class DropColumn(AlterTable):
+    def __init__(
+        self, name: str, column: Column[Any], schema: Optional[str] = None
+    ) -> None:
+        super().__init__(name, schema=schema)
+        self.column = column
+
+
+class ColumnComment(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, comment: Optional[str], **kw
+    ) -> None:
+        super().__init__(name, column_name, **kw)
+        self.comment = comment
+
+
+@compiles(RenameTable)  # type: ignore[misc]
+def visit_rename_table(
+    element: RenameTable, compiler: DDLCompiler, **kw
+) -> str:
+    return "%s RENAME TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, element.schema),
+    )
+
+
+@compiles(AddColumn)  # type: ignore[misc]
+def visit_add_column(element: AddColumn, compiler: DDLCompiler, **kw) -> str:
+    return "%s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        add_column(compiler, element.column, **kw),
+    )
+
+
+@compiles(DropColumn)  # type: ignore[misc]
+def visit_drop_column(element: DropColumn, compiler: DDLCompiler, **kw) -> str:
+    return "%s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        drop_column(compiler, element.column.name, **kw),
+    )
+
+
+@compiles(ColumnNullable)  # type: ignore[misc]
+def visit_column_nullable(
+    element: ColumnNullable, compiler: DDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "DROP NOT NULL" if element.nullable else "SET NOT NULL",
+    )
+
+
+@compiles(ColumnType)  # type: ignore[misc]
+def visit_column_type(element: ColumnType, compiler: DDLCompiler, **kw) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "TYPE %s" % format_type(compiler, element.type_),
+    )
+
+
+@compiles(ColumnName)  # type: ignore[misc]
+def visit_column_name(element: ColumnName, compiler: DDLCompiler, **kw) -> str:
+    return "%s RENAME %s TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+    )
+
+
+@compiles(ColumnDefault)  # type: ignore[misc]
+def visit_column_default(
+    element: ColumnDefault, compiler: DDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "SET DEFAULT %s" % format_server_default(compiler, element.default)
+        if element.default is not None
+        else "DROP DEFAULT",
+    )
+
+
+@compiles(ComputedColumnDefault)  # type: ignore[misc]
+def visit_computed_column(
+    element: ComputedColumnDefault, compiler: DDLCompiler, **kw
+):
+    raise exc.CompileError(
+        'Adding or removing a "computed" construct, e.g. GENERATED '
+        "ALWAYS AS, to or from an existing column is not supported."
+    )
+
+
+@compiles(IdentityColumnDefault)  # type: ignore[misc]
+def visit_identity_column(
+    element: IdentityColumnDefault, compiler: DDLCompiler, **kw
+):
+    raise exc.CompileError(
+        'Adding, removing or modifying an "identity" construct, '
+        "e.g. GENERATED AS IDENTITY, to or from an existing "
+        "column is not supported in this dialect."
+    )
+
+
+def quote_dotted(
+    name: Union[quoted_name, str], quote: functools.partial
+) -> Union[quoted_name, str]:
+    """quote the elements of a dotted name"""
+
+    if isinstance(name, quoted_name):
+        return quote(name)
+    result = ".".join([quote(x) for x in name.split(".")])
+    return result
+
+
+def format_table_name(
+    compiler: Compiled,
+    name: Union[quoted_name, str],
+    schema: Optional[Union[quoted_name, str]],
+) -> Union[quoted_name, str]:
+    quote = functools.partial(compiler.preparer.quote)
+    if schema:
+        return quote_dotted(schema, quote) + "." + quote(name)
+    else:
+        return quote(name)
+
+
+def format_column_name(
+    compiler: DDLCompiler, name: Optional[Union[quoted_name, str]]
+) -> Union[quoted_name, str]:
+    return compiler.preparer.quote(name)  # type: ignore[arg-type]
+
+
+def format_server_default(
+    compiler: DDLCompiler,
+    default: Optional[_ServerDefault],
+) -> str:
+    return compiler.get_column_default_string(
+        Column("x", Integer, server_default=default)
+    )
+
+
+def format_type(compiler: DDLCompiler, type_: TypeEngine) -> str:
+    return compiler.dialect.type_compiler.process(type_)
+
+
+def alter_table(
+    compiler: DDLCompiler,
+    name: str,
+    schema: Optional[str],
+) -> str:
+    return "ALTER TABLE %s" % format_table_name(compiler, name, schema)
+
+
+def drop_column(compiler: DDLCompiler, name: str, **kw) -> str:
+    return "DROP COLUMN %s" % format_column_name(compiler, name)
+
+
+def alter_column(compiler: DDLCompiler, name: str) -> str:
+    return "ALTER COLUMN %s" % format_column_name(compiler, name)
+
+
+def add_column(compiler: DDLCompiler, column: Column[Any], **kw) -> str:
+    text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
+
+    const = " ".join(
+        compiler.process(constraint) for constraint in column.constraints
+    )
+    if const:
+        text += " " + const
+
+    return text
diff --git a/venv/Lib/site-packages/alembic/ddl/impl.py b/venv/Lib/site-packages/alembic/ddl/impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e4f1ae9405eac6c755f3f4f3957efa717ecd8da
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/impl.py
@@ -0,0 +1,844 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import logging
+import re
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import Iterable
+from typing import List
+from typing import Mapping
+from typing import NamedTuple
+from typing import Optional
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import cast
+from sqlalchemy import schema
+from sqlalchemy import text
+
+from . import _autogen
+from . import base
+from ._autogen import _constraint_sig as _constraint_sig
+from ._autogen import ComparisonResult as ComparisonResult
+from .. import util
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+    from typing import TextIO
+
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.engine.cursor import CursorResult
+    from sqlalchemy.engine.reflection import Inspector
+    from sqlalchemy.sql import ClauseElement
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import ForeignKeyConstraint
+    from sqlalchemy.sql.schema import Index
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.schema import UniqueConstraint
+    from sqlalchemy.sql.selectable import TableClause
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .base import _ServerDefault
+    from ..autogenerate.api import AutogenContext
+    from ..operations.batch import ApplyBatchImpl
+    from ..operations.batch import BatchOperationsImpl
+
+log = logging.getLogger(__name__)
+
+
+class ImplMeta(type):
+    def __init__(
+        cls,
+        classname: str,
+        bases: Tuple[Type[DefaultImpl]],
+        dict_: Dict[str, Any],
+    ):
+        newtype = type.__init__(cls, classname, bases, dict_)
+        if "__dialect__" in dict_:
+            _impls[dict_["__dialect__"]] = cls  # type: ignore[assignment]
+        return newtype
+
+
+_impls: Dict[str, Type[DefaultImpl]] = {}
+
+
+class DefaultImpl(metaclass=ImplMeta):
+
+    """Provide the entrypoint for major migration operations,
+    including database-specific behavioral variances.
+
+    While individual SQL/DDL constructs already provide
+    for database-specific implementations, variances here
+    allow for entirely different sequences of operations
+    to take place for a particular migration, such as
+    SQL Server's special 'IDENTITY INSERT' step for
+    bulk inserts.
+
+    """
+
+    __dialect__ = "default"
+
+    transactional_ddl = False
+    command_terminator = ";"
+    type_synonyms: Tuple[Set[str], ...] = ({"NUMERIC", "DECIMAL"},)
+    type_arg_extract: Sequence[str] = ()
+    # These attributes are deprecated in SQLAlchemy via #10247. They need to
+    # be ignored to support older version that did not use dialect kwargs.
+    # They only apply to Oracle and are replaced by oracle_order,
+    # oracle_on_null
+    identity_attrs_ignore: Tuple[str, ...] = ("order", "on_null")
+
+    def __init__(
+        self,
+        dialect: Dialect,
+        connection: Optional[Connection],
+        as_sql: bool,
+        transactional_ddl: Optional[bool],
+        output_buffer: Optional[TextIO],
+        context_opts: Dict[str, Any],
+    ) -> None:
+        self.dialect = dialect
+        self.connection = connection
+        self.as_sql = as_sql
+        self.literal_binds = context_opts.get("literal_binds", False)
+
+        self.output_buffer = output_buffer
+        self.memo: dict = {}
+        self.context_opts = context_opts
+        if transactional_ddl is not None:
+            self.transactional_ddl = transactional_ddl
+
+        if self.literal_binds:
+            if not self.as_sql:
+                raise util.CommandError(
+                    "Can't use literal_binds setting without as_sql mode"
+                )
+
+    @classmethod
+    def get_by_dialect(cls, dialect: Dialect) -> Type[DefaultImpl]:
+        return _impls[dialect.name]
+
+    def static_output(self, text: str) -> None:
+        assert self.output_buffer is not None
+        self.output_buffer.write(text + "\n\n")
+        self.output_buffer.flush()
+
+    def requires_recreate_in_batch(
+        self, batch_op: BatchOperationsImpl
+    ) -> bool:
+        """Return True if the given :class:`.BatchOperationsImpl`
+        would need the table to be recreated and copied in order to
+        proceed.
+
+        Normally, only returns True on SQLite when operations other
+        than add_column are present.
+
+        """
+        return False
+
+    def prep_table_for_batch(
+        self, batch_impl: ApplyBatchImpl, table: Table
+    ) -> None:
+        """perform any operations needed on a table before a new
+        one is created to replace it in batch mode.
+
+        the PG dialect uses this to drop constraints on the table
+        before the new one uses those same names.
+
+        """
+
+    @property
+    def bind(self) -> Optional[Connection]:
+        return self.connection
+
+    def _exec(
+        self,
+        construct: Union[Executable, str],
+        execution_options: Optional[dict[str, Any]] = None,
+        multiparams: Sequence[dict] = (),
+        params: Dict[str, Any] = util.immutabledict(),
+    ) -> Optional[CursorResult]:
+        if isinstance(construct, str):
+            construct = text(construct)
+        if self.as_sql:
+            if multiparams or params:
+                # TODO: coverage
+                raise Exception("Execution arguments not allowed with as_sql")
+
+            compile_kw: dict[str, Any]
+            if self.literal_binds and not isinstance(
+                construct, schema.DDLElement
+            ):
+                compile_kw = dict(compile_kwargs={"literal_binds": True})
+            else:
+                compile_kw = {}
+
+            if TYPE_CHECKING:
+                assert isinstance(construct, ClauseElement)
+            compiled = construct.compile(dialect=self.dialect, **compile_kw)
+            self.static_output(
+                str(compiled).replace("\t", "    ").strip()
+                + self.command_terminator
+            )
+            return None
+        else:
+            conn = self.connection
+            assert conn is not None
+            if execution_options:
+                conn = conn.execution_options(**execution_options)
+            if params:
+                assert isinstance(multiparams, tuple)
+                multiparams += (params,)
+
+            return conn.execute(construct, multiparams)
+
+    def execute(
+        self,
+        sql: Union[Executable, str],
+        execution_options: Optional[dict[str, Any]] = None,
+    ) -> None:
+        self._exec(sql, execution_options)
+
+    def alter_column(
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Union[_ServerDefault, Literal[False]] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        schema: Optional[str] = None,
+        autoincrement: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+        existing_comment: Optional[str] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_nullable: Optional[bool] = None,
+        existing_autoincrement: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        if autoincrement is not None or existing_autoincrement is not None:
+            util.warn(
+                "autoincrement and existing_autoincrement "
+                "only make sense for MySQL",
+                stacklevel=3,
+            )
+        if nullable is not None:
+            self._exec(
+                base.ColumnNullable(
+                    table_name,
+                    column_name,
+                    nullable,
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                    existing_comment=existing_comment,
+                )
+            )
+        if server_default is not False:
+            kw = {}
+            cls_: Type[
+                Union[
+                    base.ComputedColumnDefault,
+                    base.IdentityColumnDefault,
+                    base.ColumnDefault,
+                ]
+            ]
+            if sqla_compat._server_default_is_computed(
+                server_default, existing_server_default
+            ):
+                cls_ = base.ComputedColumnDefault
+            elif sqla_compat._server_default_is_identity(
+                server_default, existing_server_default
+            ):
+                cls_ = base.IdentityColumnDefault
+                kw["impl"] = self
+            else:
+                cls_ = base.ColumnDefault
+            self._exec(
+                cls_(
+                    table_name,
+                    column_name,
+                    server_default,  # type:ignore[arg-type]
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                    existing_comment=existing_comment,
+                    **kw,
+                )
+            )
+        if type_ is not None:
+            self._exec(
+                base.ColumnType(
+                    table_name,
+                    column_name,
+                    type_,
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                    existing_comment=existing_comment,
+                )
+            )
+
+        if comment is not False:
+            self._exec(
+                base.ColumnComment(
+                    table_name,
+                    column_name,
+                    comment,
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                    existing_comment=existing_comment,
+                )
+            )
+
+        # do the new name last ;)
+        if name is not None:
+            self._exec(
+                base.ColumnName(
+                    table_name,
+                    column_name,
+                    name,
+                    schema=schema,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                )
+            )
+
+    def add_column(
+        self,
+        table_name: str,
+        column: Column[Any],
+        schema: Optional[Union[str, quoted_name]] = None,
+    ) -> None:
+        self._exec(base.AddColumn(table_name, column, schema=schema))
+
+    def drop_column(
+        self,
+        table_name: str,
+        column: Column[Any],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> None:
+        self._exec(base.DropColumn(table_name, column, schema=schema))
+
+    def add_constraint(self, const: Any) -> None:
+        if const._create_rule is None or const._create_rule(self):
+            self._exec(schema.AddConstraint(const))
+
+    def drop_constraint(self, const: Constraint) -> None:
+        self._exec(schema.DropConstraint(const))
+
+    def rename_table(
+        self,
+        old_table_name: str,
+        new_table_name: Union[str, quoted_name],
+        schema: Optional[Union[str, quoted_name]] = None,
+    ) -> None:
+        self._exec(
+            base.RenameTable(old_table_name, new_table_name, schema=schema)
+        )
+
+    def create_table(self, table: Table) -> None:
+        table.dispatch.before_create(
+            table, self.connection, checkfirst=False, _ddl_runner=self
+        )
+        self._exec(schema.CreateTable(table))
+        table.dispatch.after_create(
+            table, self.connection, checkfirst=False, _ddl_runner=self
+        )
+        for index in table.indexes:
+            self._exec(schema.CreateIndex(index))
+
+        with_comment = (
+            self.dialect.supports_comments and not self.dialect.inline_comments
+        )
+        comment = table.comment
+        if comment and with_comment:
+            self.create_table_comment(table)
+
+        for column in table.columns:
+            comment = column.comment
+            if comment and with_comment:
+                self.create_column_comment(column)
+
+    def drop_table(self, table: Table) -> None:
+        table.dispatch.before_drop(
+            table, self.connection, checkfirst=False, _ddl_runner=self
+        )
+        self._exec(schema.DropTable(table))
+        table.dispatch.after_drop(
+            table, self.connection, checkfirst=False, _ddl_runner=self
+        )
+
+    def create_index(self, index: Index, **kw: Any) -> None:
+        self._exec(schema.CreateIndex(index, **kw))
+
+    def create_table_comment(self, table: Table) -> None:
+        self._exec(schema.SetTableComment(table))
+
+    def drop_table_comment(self, table: Table) -> None:
+        self._exec(schema.DropTableComment(table))
+
+    def create_column_comment(self, column: ColumnElement[Any]) -> None:
+        self._exec(schema.SetColumnComment(column))
+
+    def drop_index(self, index: Index, **kw: Any) -> None:
+        self._exec(schema.DropIndex(index, **kw))
+
+    def bulk_insert(
+        self,
+        table: Union[TableClause, Table],
+        rows: List[dict],
+        multiinsert: bool = True,
+    ) -> None:
+        if not isinstance(rows, list):
+            raise TypeError("List expected")
+        elif rows and not isinstance(rows[0], dict):
+            raise TypeError("List of dictionaries expected")
+        if self.as_sql:
+            for row in rows:
+                self._exec(
+                    sqla_compat._insert_inline(table).values(
+                        **{
+                            k: sqla_compat._literal_bindparam(
+                                k, v, type_=table.c[k].type
+                            )
+                            if not isinstance(
+                                v, sqla_compat._literal_bindparam
+                            )
+                            else v
+                            for k, v in row.items()
+                        }
+                    )
+                )
+        else:
+            if rows:
+                if multiinsert:
+                    self._exec(
+                        sqla_compat._insert_inline(table), multiparams=rows
+                    )
+                else:
+                    for row in rows:
+                        self._exec(
+                            sqla_compat._insert_inline(table).values(**row)
+                        )
+
+    def _tokenize_column_type(self, column: Column) -> Params:
+        definition: str
+        definition = self.dialect.type_compiler.process(column.type).lower()
+
+        # tokenize the SQLAlchemy-generated version of a type, so that
+        # the two can be compared.
+        #
+        # examples:
+        # NUMERIC(10, 5)
+        # TIMESTAMP WITH TIMEZONE
+        # INTEGER UNSIGNED
+        # INTEGER (10) UNSIGNED
+        # INTEGER(10) UNSIGNED
+        # varchar character set utf8
+        #
+
+        tokens: List[str] = re.findall(r"[\w\-_]+|\(.+?\)", definition)
+
+        term_tokens: List[str] = []
+        paren_term = None
+
+        for token in tokens:
+            if re.match(r"^\(.*\)$", token):
+                paren_term = token
+            else:
+                term_tokens.append(token)
+
+        params = Params(term_tokens[0], term_tokens[1:], [], {})
+
+        if paren_term:
+            term: str
+            for term in re.findall("[^(),]+", paren_term):
+                if "=" in term:
+                    key, val = term.split("=")
+                    params.kwargs[key.strip()] = val.strip()
+                else:
+                    params.args.append(term.strip())
+
+        return params
+
+    def _column_types_match(
+        self, inspector_params: Params, metadata_params: Params
+    ) -> bool:
+        if inspector_params.token0 == metadata_params.token0:
+            return True
+
+        synonyms = [{t.lower() for t in batch} for batch in self.type_synonyms]
+        inspector_all_terms = " ".join(
+            [inspector_params.token0] + inspector_params.tokens
+        )
+        metadata_all_terms = " ".join(
+            [metadata_params.token0] + metadata_params.tokens
+        )
+
+        for batch in synonyms:
+            if {inspector_all_terms, metadata_all_terms}.issubset(batch) or {
+                inspector_params.token0,
+                metadata_params.token0,
+            }.issubset(batch):
+                return True
+        return False
+
+    def _column_args_match(
+        self, inspected_params: Params, meta_params: Params
+    ) -> bool:
+        """We want to compare column parameters. However, we only want
+        to compare parameters that are set. If they both have `collation`,
+        we want to make sure they are the same. However, if only one
+        specifies it, dont flag it for being less specific
+        """
+
+        if (
+            len(meta_params.tokens) == len(inspected_params.tokens)
+            and meta_params.tokens != inspected_params.tokens
+        ):
+            return False
+
+        if (
+            len(meta_params.args) == len(inspected_params.args)
+            and meta_params.args != inspected_params.args
+        ):
+            return False
+
+        insp = " ".join(inspected_params.tokens).lower()
+        meta = " ".join(meta_params.tokens).lower()
+
+        for reg in self.type_arg_extract:
+            mi = re.search(reg, insp)
+            mm = re.search(reg, meta)
+
+            if mi and mm and mi.group(1) != mm.group(1):
+                return False
+
+        return True
+
+    def compare_type(
+        self, inspector_column: Column[Any], metadata_column: Column
+    ) -> bool:
+        """Returns True if there ARE differences between the types of the two
+        columns. Takes impl.type_synonyms into account between retrospected
+        and metadata types
+        """
+        inspector_params = self._tokenize_column_type(inspector_column)
+        metadata_params = self._tokenize_column_type(metadata_column)
+
+        if not self._column_types_match(inspector_params, metadata_params):
+            return True
+        if not self._column_args_match(inspector_params, metadata_params):
+            return True
+        return False
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        return rendered_inspector_default != rendered_metadata_default
+
+    def correct_for_autogen_constraints(
+        self,
+        conn_uniques: Set[UniqueConstraint],
+        conn_indexes: Set[Index],
+        metadata_unique_constraints: Set[UniqueConstraint],
+        metadata_indexes: Set[Index],
+    ) -> None:
+        pass
+
+    def cast_for_batch_migrate(self, existing, existing_transfer, new_type):
+        if existing.type._type_affinity is not new_type._type_affinity:
+            existing_transfer["expr"] = cast(
+                existing_transfer["expr"], new_type
+            )
+
+    def render_ddl_sql_expr(
+        self, expr: ClauseElement, is_server_default: bool = False, **kw: Any
+    ) -> str:
+        """Render a SQL expression that is typically a server default,
+        index expression, etc.
+
+        """
+
+        compile_kw = {"literal_binds": True, "include_table": False}
+
+        return str(
+            expr.compile(dialect=self.dialect, compile_kwargs=compile_kw)
+        )
+
+    def _compat_autogen_column_reflect(self, inspector: Inspector) -> Callable:
+        return self.autogen_column_reflect
+
+    def correct_for_autogen_foreignkeys(
+        self,
+        conn_fks: Set[ForeignKeyConstraint],
+        metadata_fks: Set[ForeignKeyConstraint],
+    ) -> None:
+        pass
+
+    def autogen_column_reflect(self, inspector, table, column_info):
+        """A hook that is attached to the 'column_reflect' event for when
+        a Table is reflected from the database during the autogenerate
+        process.
+
+        Dialects can elect to modify the information gathered here.
+
+        """
+
+    def start_migrations(self) -> None:
+        """A hook called when :meth:`.EnvironmentContext.run_migrations`
+        is called.
+
+        Implementations can set up per-migration-run state here.
+
+        """
+
+    def emit_begin(self) -> None:
+        """Emit the string ``BEGIN``, or the backend-specific
+        equivalent, on the current connection context.
+
+        This is used in offline mode and typically
+        via :meth:`.EnvironmentContext.begin_transaction`.
+
+        """
+        self.static_output("BEGIN" + self.command_terminator)
+
+    def emit_commit(self) -> None:
+        """Emit the string ``COMMIT``, or the backend-specific
+        equivalent, on the current connection context.
+
+        This is used in offline mode and typically
+        via :meth:`.EnvironmentContext.begin_transaction`.
+
+        """
+        self.static_output("COMMIT" + self.command_terminator)
+
+    def render_type(
+        self, type_obj: TypeEngine, autogen_context: AutogenContext
+    ) -> Union[str, Literal[False]]:
+        return False
+
+    def _compare_identity_default(self, metadata_identity, inspector_identity):
+        # ignored contains the attributes that were not considered
+        # because assumed to their default values in the db.
+        diff, ignored = _compare_identity_options(
+            metadata_identity,
+            inspector_identity,
+            sqla_compat.Identity(),
+            skip={"always"},
+        )
+
+        meta_always = getattr(metadata_identity, "always", None)
+        inspector_always = getattr(inspector_identity, "always", None)
+        # None and False are the same in this comparison
+        if bool(meta_always) != bool(inspector_always):
+            diff.add("always")
+
+        diff.difference_update(self.identity_attrs_ignore)
+
+        # returns 3 values:
+        return (
+            # different identity attributes
+            diff,
+            # ignored identity attributes
+            ignored,
+            # if the two identity should be considered different
+            bool(diff) or bool(metadata_identity) != bool(inspector_identity),
+        )
+
+    def _compare_index_unique(
+        self, metadata_index: Index, reflected_index: Index
+    ) -> Optional[str]:
+        conn_unique = bool(reflected_index.unique)
+        meta_unique = bool(metadata_index.unique)
+        if conn_unique != meta_unique:
+            return f"unique={conn_unique} to unique={meta_unique}"
+        else:
+            return None
+
+    def _create_metadata_constraint_sig(
+        self, constraint: _autogen._C, **opts: Any
+    ) -> _constraint_sig[_autogen._C]:
+        return _constraint_sig.from_constraint(True, self, constraint, **opts)
+
+    def _create_reflected_constraint_sig(
+        self, constraint: _autogen._C, **opts: Any
+    ) -> _constraint_sig[_autogen._C]:
+        return _constraint_sig.from_constraint(False, self, constraint, **opts)
+
+    def compare_indexes(
+        self,
+        metadata_index: Index,
+        reflected_index: Index,
+    ) -> ComparisonResult:
+        """Compare two indexes by comparing the signature generated by
+        ``create_index_sig``.
+
+        This method returns a ``ComparisonResult``.
+        """
+        msg: List[str] = []
+        unique_msg = self._compare_index_unique(
+            metadata_index, reflected_index
+        )
+        if unique_msg:
+            msg.append(unique_msg)
+        m_sig = self._create_metadata_constraint_sig(metadata_index)
+        r_sig = self._create_reflected_constraint_sig(reflected_index)
+
+        assert _autogen.is_index_sig(m_sig)
+        assert _autogen.is_index_sig(r_sig)
+
+        # The assumption is that the index have no expression
+        for sig in m_sig, r_sig:
+            if sig.has_expressions:
+                log.warning(
+                    "Generating approximate signature for index %s. "
+                    "The dialect "
+                    "implementation should either skip expression indexes "
+                    "or provide a custom implementation.",
+                    sig.const,
+                )
+
+        if m_sig.column_names != r_sig.column_names:
+            msg.append(
+                f"expression {r_sig.column_names} to {m_sig.column_names}"
+            )
+
+        if msg:
+            return ComparisonResult.Different(msg)
+        else:
+            return ComparisonResult.Equal()
+
+    def compare_unique_constraint(
+        self,
+        metadata_constraint: UniqueConstraint,
+        reflected_constraint: UniqueConstraint,
+    ) -> ComparisonResult:
+        """Compare two unique constraints by comparing the two signatures.
+
+        The arguments are two tuples that contain the unique constraint and
+        the signatures generated by ``create_unique_constraint_sig``.
+
+        This method returns a ``ComparisonResult``.
+        """
+        metadata_tup = self._create_metadata_constraint_sig(
+            metadata_constraint
+        )
+        reflected_tup = self._create_reflected_constraint_sig(
+            reflected_constraint
+        )
+
+        meta_sig = metadata_tup.unnamed
+        conn_sig = reflected_tup.unnamed
+        if conn_sig != meta_sig:
+            return ComparisonResult.Different(
+                f"expression {conn_sig} to {meta_sig}"
+            )
+        else:
+            return ComparisonResult.Equal()
+
+    def _skip_functional_indexes(self, metadata_indexes, conn_indexes):
+        conn_indexes_by_name = {c.name: c for c in conn_indexes}
+
+        for idx in list(metadata_indexes):
+            if idx.name in conn_indexes_by_name:
+                continue
+            iex = sqla_compat.is_expression_index(idx)
+            if iex:
+                util.warn(
+                    "autogenerate skipping metadata-specified "
+                    "expression-based index "
+                    f"{idx.name!r}; dialect {self.__dialect__!r} under "
+                    f"SQLAlchemy {sqla_compat.sqlalchemy_version} can't "
+                    "reflect these indexes so they can't be compared"
+                )
+                metadata_indexes.discard(idx)
+
+    def adjust_reflected_dialect_options(
+        self, reflected_object: Dict[str, Any], kind: str
+    ) -> Dict[str, Any]:
+        return reflected_object.get("dialect_options", {})
+
+
+class Params(NamedTuple):
+    token0: str
+    tokens: List[str]
+    args: List[str]
+    kwargs: Dict[str, str]
+
+
+def _compare_identity_options(
+    metadata_io: Union[schema.Identity, schema.Sequence, None],
+    inspector_io: Union[schema.Identity, schema.Sequence, None],
+    default_io: Union[schema.Identity, schema.Sequence],
+    skip: Set[str],
+):
+    # this can be used for identity or sequence compare.
+    # default_io is an instance of IdentityOption with all attributes to the
+    # default value.
+    meta_d = sqla_compat._get_identity_options_dict(metadata_io)
+    insp_d = sqla_compat._get_identity_options_dict(inspector_io)
+
+    diff = set()
+    ignored_attr = set()
+
+    def check_dicts(
+        meta_dict: Mapping[str, Any],
+        insp_dict: Mapping[str, Any],
+        default_dict: Mapping[str, Any],
+        attrs: Iterable[str],
+    ):
+        for attr in set(attrs).difference(skip):
+            meta_value = meta_dict.get(attr)
+            insp_value = insp_dict.get(attr)
+            if insp_value != meta_value:
+                default_value = default_dict.get(attr)
+                if meta_value == default_value:
+                    ignored_attr.add(attr)
+                else:
+                    diff.add(attr)
+
+    check_dicts(
+        meta_d,
+        insp_d,
+        sqla_compat._get_identity_options_dict(default_io),
+        set(meta_d).union(insp_d),
+    )
+    if sqla_compat.identity_has_dialect_kwargs:
+        # use only the dialect kwargs in inspector_io since metadata_io
+        # can have options for many backends
+        check_dicts(
+            getattr(metadata_io, "dialect_kwargs", {}),
+            getattr(inspector_io, "dialect_kwargs", {}),
+            default_io.dialect_kwargs,  # type: ignore[union-attr]
+            getattr(inspector_io, "dialect_kwargs", {}),
+        )
+
+    return diff, ignored_attr
diff --git a/venv/Lib/site-packages/alembic/ddl/mssql.py b/venv/Lib/site-packages/alembic/ddl/mssql.py
new file mode 100644
index 0000000000000000000000000000000000000000..baa43d5e73abb3e40294c0000d0f2694182744eb
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/mssql.py
@@ -0,0 +1,419 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import types as sqltypes
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import CreateIndex
+from sqlalchemy.sql.base import Executable
+from sqlalchemy.sql.elements import ClauseElement
+
+from .base import AddColumn
+from .base import alter_column
+from .base import alter_table
+from .base import ColumnDefault
+from .base import ColumnName
+from .base import ColumnNullable
+from .base import ColumnType
+from .base import format_column_name
+from .base import format_server_default
+from .base import format_table_name
+from .base import format_type
+from .base import RenameTable
+from .impl import DefaultImpl
+from .. import util
+from ..util import sqla_compat
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.dialects.mssql.base import MSDDLCompiler
+    from sqlalchemy.dialects.mssql.base import MSSQLCompiler
+    from sqlalchemy.engine.cursor import CursorResult
+    from sqlalchemy.sql.schema import Index
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.selectable import TableClause
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .base import _ServerDefault
+
+
+class MSSQLImpl(DefaultImpl):
+    __dialect__ = "mssql"
+    transactional_ddl = True
+    batch_separator = "GO"
+
+    type_synonyms = DefaultImpl.type_synonyms + ({"VARCHAR", "NVARCHAR"},)
+    identity_attrs_ignore = DefaultImpl.identity_attrs_ignore + (
+        "minvalue",
+        "maxvalue",
+        "nominvalue",
+        "nomaxvalue",
+        "cycle",
+        "cache",
+    )
+
+    def __init__(self, *arg, **kw) -> None:
+        super().__init__(*arg, **kw)
+        self.batch_separator = self.context_opts.get(
+            "mssql_batch_separator", self.batch_separator
+        )
+
+    def _exec(self, construct: Any, *args, **kw) -> Optional[CursorResult]:
+        result = super()._exec(construct, *args, **kw)
+        if self.as_sql and self.batch_separator:
+            self.static_output(self.batch_separator)
+        return result
+
+    def emit_begin(self) -> None:
+        self.static_output("BEGIN TRANSACTION" + self.command_terminator)
+
+    def emit_commit(self) -> None:
+        super().emit_commit()
+        if self.as_sql and self.batch_separator:
+            self.static_output(self.batch_separator)
+
+    def alter_column(  # type:ignore[override]
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Optional[
+            Union[_ServerDefault, Literal[False]]
+        ] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        schema: Optional[str] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_nullable: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        if nullable is not None:
+            if type_ is not None:
+                # the NULL/NOT NULL alter will handle
+                # the type alteration
+                existing_type = type_
+                type_ = None
+            elif existing_type is None:
+                raise util.CommandError(
+                    "MS-SQL ALTER COLUMN operations "
+                    "with NULL or NOT NULL require the "
+                    "existing_type or a new type_ be passed."
+                )
+        elif existing_nullable is not None and type_ is not None:
+            nullable = existing_nullable
+
+            # the NULL/NOT NULL alter will handle
+            # the type alteration
+            existing_type = type_
+            type_ = None
+
+        elif type_ is not None:
+            util.warn(
+                "MS-SQL ALTER COLUMN operations that specify type_= "
+                "should also specify a nullable= or "
+                "existing_nullable= argument to avoid implicit conversion "
+                "of NOT NULL columns to NULL."
+            )
+
+        used_default = False
+        if sqla_compat._server_default_is_identity(
+            server_default, existing_server_default
+        ) or sqla_compat._server_default_is_computed(
+            server_default, existing_server_default
+        ):
+            used_default = True
+            kw["server_default"] = server_default
+            kw["existing_server_default"] = existing_server_default
+
+        super().alter_column(
+            table_name,
+            column_name,
+            nullable=nullable,
+            type_=type_,
+            schema=schema,
+            existing_type=existing_type,
+            existing_nullable=existing_nullable,
+            **kw,
+        )
+
+        if server_default is not False and used_default is False:
+            if existing_server_default is not False or server_default is None:
+                self._exec(
+                    _ExecDropConstraint(
+                        table_name,
+                        column_name,
+                        "sys.default_constraints",
+                        schema,
+                    )
+                )
+            if server_default is not None:
+                super().alter_column(
+                    table_name,
+                    column_name,
+                    schema=schema,
+                    server_default=server_default,
+                )
+
+        if name is not None:
+            super().alter_column(
+                table_name, column_name, schema=schema, name=name
+            )
+
+    def create_index(self, index: Index, **kw: Any) -> None:
+        # this likely defaults to None if not present, so get()
+        # should normally not return the default value.  being
+        # defensive in any case
+        mssql_include = index.kwargs.get("mssql_include", None) or ()
+        assert index.table is not None
+        for col in mssql_include:
+            if col not in index.table.c:
+                index.table.append_column(Column(col, sqltypes.NullType))
+        self._exec(CreateIndex(index, **kw))
+
+    def bulk_insert(  # type:ignore[override]
+        self, table: Union[TableClause, Table], rows: List[dict], **kw: Any
+    ) -> None:
+        if self.as_sql:
+            self._exec(
+                "SET IDENTITY_INSERT %s ON"
+                % self.dialect.identifier_preparer.format_table(table)
+            )
+            super().bulk_insert(table, rows, **kw)
+            self._exec(
+                "SET IDENTITY_INSERT %s OFF"
+                % self.dialect.identifier_preparer.format_table(table)
+            )
+        else:
+            super().bulk_insert(table, rows, **kw)
+
+    def drop_column(
+        self,
+        table_name: str,
+        column: Column[Any],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> None:
+        drop_default = kw.pop("mssql_drop_default", False)
+        if drop_default:
+            self._exec(
+                _ExecDropConstraint(
+                    table_name, column, "sys.default_constraints", schema
+                )
+            )
+        drop_check = kw.pop("mssql_drop_check", False)
+        if drop_check:
+            self._exec(
+                _ExecDropConstraint(
+                    table_name, column, "sys.check_constraints", schema
+                )
+            )
+        drop_fks = kw.pop("mssql_drop_foreign_key", False)
+        if drop_fks:
+            self._exec(_ExecDropFKConstraint(table_name, column, schema))
+        super().drop_column(table_name, column, schema=schema, **kw)
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        if rendered_metadata_default is not None:
+            rendered_metadata_default = re.sub(
+                r"[\(\) \"\']", "", rendered_metadata_default
+            )
+
+        if rendered_inspector_default is not None:
+            # SQL Server collapses whitespace and adds arbitrary parenthesis
+            # within expressions.   our only option is collapse all of it
+
+            rendered_inspector_default = re.sub(
+                r"[\(\) \"\']", "", rendered_inspector_default
+            )
+
+        return rendered_inspector_default != rendered_metadata_default
+
+    def _compare_identity_default(self, metadata_identity, inspector_identity):
+        diff, ignored, is_alter = super()._compare_identity_default(
+            metadata_identity, inspector_identity
+        )
+
+        if (
+            metadata_identity is None
+            and inspector_identity is not None
+            and not diff
+            and inspector_identity.column is not None
+            and inspector_identity.column.primary_key
+        ):
+            # mssql reflect primary keys with autoincrement as identity
+            # columns. if no different attributes are present ignore them
+            is_alter = False
+
+        return diff, ignored, is_alter
+
+    def adjust_reflected_dialect_options(
+        self, reflected_object: Dict[str, Any], kind: str
+    ) -> Dict[str, Any]:
+        options: Dict[str, Any]
+        options = reflected_object.get("dialect_options", {}).copy()
+        if not options.get("mssql_include"):
+            options.pop("mssql_include", None)
+        if not options.get("mssql_clustered"):
+            options.pop("mssql_clustered", None)
+        return options
+
+
+class _ExecDropConstraint(Executable, ClauseElement):
+    inherit_cache = False
+
+    def __init__(
+        self,
+        tname: str,
+        colname: Union[Column[Any], str],
+        type_: str,
+        schema: Optional[str],
+    ) -> None:
+        self.tname = tname
+        self.colname = colname
+        self.type_ = type_
+        self.schema = schema
+
+
+class _ExecDropFKConstraint(Executable, ClauseElement):
+    inherit_cache = False
+
+    def __init__(
+        self, tname: str, colname: Column[Any], schema: Optional[str]
+    ) -> None:
+        self.tname = tname
+        self.colname = colname
+        self.schema = schema
+
+
+@compiles(_ExecDropConstraint, "mssql")
+def _exec_drop_col_constraint(
+    element: _ExecDropConstraint, compiler: MSSQLCompiler, **kw
+) -> str:
+    schema, tname, colname, type_ = (
+        element.schema,
+        element.tname,
+        element.colname,
+        element.type_,
+    )
+    # from http://www.mssqltips.com/sqlservertip/1425/\
+    # working-with-default-constraints-in-sql-server/
+    return """declare @const_name varchar(256)
+select @const_name = QUOTENAME([name]) from %(type)s
+where parent_object_id = object_id('%(schema_dot)s%(tname)s')
+and col_name(parent_object_id, parent_column_id) = '%(colname)s'
+exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % {
+        "type": type_,
+        "tname": tname,
+        "colname": colname,
+        "tname_quoted": format_table_name(compiler, tname, schema),
+        "schema_dot": schema + "." if schema else "",
+    }
+
+
+@compiles(_ExecDropFKConstraint, "mssql")
+def _exec_drop_col_fk_constraint(
+    element: _ExecDropFKConstraint, compiler: MSSQLCompiler, **kw
+) -> str:
+    schema, tname, colname = element.schema, element.tname, element.colname
+
+    return """declare @const_name varchar(256)
+select @const_name = QUOTENAME([name]) from
+sys.foreign_keys fk join sys.foreign_key_columns fkc
+on fk.object_id=fkc.constraint_object_id
+where fkc.parent_object_id = object_id('%(schema_dot)s%(tname)s')
+and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s'
+exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % {
+        "tname": tname,
+        "colname": colname,
+        "tname_quoted": format_table_name(compiler, tname, schema),
+        "schema_dot": schema + "." if schema else "",
+    }
+
+
+@compiles(AddColumn, "mssql")
+def visit_add_column(element: AddColumn, compiler: MSDDLCompiler, **kw) -> str:
+    return "%s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        mssql_add_column(compiler, element.column, **kw),
+    )
+
+
+def mssql_add_column(
+    compiler: MSDDLCompiler, column: Column[Any], **kw
+) -> str:
+    return "ADD %s" % compiler.get_column_specification(column, **kw)
+
+
+@compiles(ColumnNullable, "mssql")
+def visit_column_nullable(
+    element: ColumnNullable, compiler: MSDDLCompiler, **kw
+) -> str:
+    return "%s %s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        format_type(compiler, element.existing_type),  # type: ignore[arg-type]
+        "NULL" if element.nullable else "NOT NULL",
+    )
+
+
+@compiles(ColumnDefault, "mssql")
+def visit_column_default(
+    element: ColumnDefault, compiler: MSDDLCompiler, **kw
+) -> str:
+    # TODO: there can also be a named constraint
+    # with ADD CONSTRAINT here
+    return "%s ADD DEFAULT %s FOR %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_server_default(compiler, element.default),
+        format_column_name(compiler, element.column_name),
+    )
+
+
+@compiles(ColumnName, "mssql")
+def visit_rename_column(
+    element: ColumnName, compiler: MSDDLCompiler, **kw
+) -> str:
+    return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % (
+        format_table_name(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+    )
+
+
+@compiles(ColumnType, "mssql")
+def visit_column_type(
+    element: ColumnType, compiler: MSDDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        format_type(compiler, element.type_),
+    )
+
+
+@compiles(RenameTable, "mssql")
+def visit_rename_table(
+    element: RenameTable, compiler: MSDDLCompiler, **kw
+) -> str:
+    return "EXEC sp_rename '%s', %s" % (
+        format_table_name(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, None),
+    )
diff --git a/venv/Lib/site-packages/alembic/ddl/mysql.py b/venv/Lib/site-packages/alembic/ddl/mysql.py
new file mode 100644
index 0000000000000000000000000000000000000000..f312173e946d117b276e06ed5aa290f18f7db61b
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/mysql.py
@@ -0,0 +1,474 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import schema
+from sqlalchemy import types as sqltypes
+
+from .base import alter_table
+from .base import AlterColumn
+from .base import ColumnDefault
+from .base import ColumnName
+from .base import ColumnNullable
+from .base import ColumnType
+from .base import format_column_name
+from .base import format_server_default
+from .impl import DefaultImpl
+from .. import util
+from ..util import sqla_compat
+from ..util.sqla_compat import _is_mariadb
+from ..util.sqla_compat import _is_type_bound
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.dialects.mysql.base import MySQLDDLCompiler
+    from sqlalchemy.sql.ddl import DropConstraint
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .base import _ServerDefault
+
+
+class MySQLImpl(DefaultImpl):
+    __dialect__ = "mysql"
+
+    transactional_ddl = False
+    type_synonyms = DefaultImpl.type_synonyms + (
+        {"BOOL", "TINYINT"},
+        {"JSON", "LONGTEXT"},
+    )
+    type_arg_extract = [r"character set ([\w\-_]+)", r"collate ([\w\-_]+)"]
+
+    def alter_column(  # type:ignore[override]
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Union[_ServerDefault, Literal[False]] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        schema: Optional[str] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_nullable: Optional[bool] = None,
+        autoincrement: Optional[bool] = None,
+        existing_autoincrement: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+        existing_comment: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        if sqla_compat._server_default_is_identity(
+            server_default, existing_server_default
+        ) or sqla_compat._server_default_is_computed(
+            server_default, existing_server_default
+        ):
+            # modifying computed or identity columns is not supported
+            # the default will raise
+            super().alter_column(
+                table_name,
+                column_name,
+                nullable=nullable,
+                type_=type_,
+                schema=schema,
+                existing_type=existing_type,
+                existing_nullable=existing_nullable,
+                server_default=server_default,
+                existing_server_default=existing_server_default,
+                **kw,
+            )
+        if name is not None or self._is_mysql_allowed_functional_default(
+            type_ if type_ is not None else existing_type, server_default
+        ):
+            self._exec(
+                MySQLChangeColumn(
+                    table_name,
+                    column_name,
+                    schema=schema,
+                    newname=name if name is not None else column_name,
+                    nullable=nullable
+                    if nullable is not None
+                    else existing_nullable
+                    if existing_nullable is not None
+                    else True,
+                    type_=type_ if type_ is not None else existing_type,
+                    default=server_default
+                    if server_default is not False
+                    else existing_server_default,
+                    autoincrement=autoincrement
+                    if autoincrement is not None
+                    else existing_autoincrement,
+                    comment=comment
+                    if comment is not False
+                    else existing_comment,
+                )
+            )
+        elif (
+            nullable is not None
+            or type_ is not None
+            or autoincrement is not None
+            or comment is not False
+        ):
+            self._exec(
+                MySQLModifyColumn(
+                    table_name,
+                    column_name,
+                    schema=schema,
+                    newname=name if name is not None else column_name,
+                    nullable=nullable
+                    if nullable is not None
+                    else existing_nullable
+                    if existing_nullable is not None
+                    else True,
+                    type_=type_ if type_ is not None else existing_type,
+                    default=server_default
+                    if server_default is not False
+                    else existing_server_default,
+                    autoincrement=autoincrement
+                    if autoincrement is not None
+                    else existing_autoincrement,
+                    comment=comment
+                    if comment is not False
+                    else existing_comment,
+                )
+            )
+        elif server_default is not False:
+            self._exec(
+                MySQLAlterDefault(
+                    table_name, column_name, server_default, schema=schema
+                )
+            )
+
+    def drop_constraint(
+        self,
+        const: Constraint,
+    ) -> None:
+        if isinstance(const, schema.CheckConstraint) and _is_type_bound(const):
+            return
+
+        super().drop_constraint(const)
+
+    def _is_mysql_allowed_functional_default(
+        self,
+        type_: Optional[TypeEngine],
+        server_default: Union[_ServerDefault, Literal[False]],
+    ) -> bool:
+        return (
+            type_ is not None
+            and type_._type_affinity is sqltypes.DateTime
+            and server_default is not None
+        )
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        # partially a workaround for SQLAlchemy issue #3023; if the
+        # column were created without "NOT NULL", MySQL may have added
+        # an implicit default of '0' which we need to skip
+        # TODO: this is not really covered anymore ?
+        if (
+            metadata_column.type._type_affinity is sqltypes.Integer
+            and inspector_column.primary_key
+            and not inspector_column.autoincrement
+            and not rendered_metadata_default
+            and rendered_inspector_default == "'0'"
+        ):
+            return False
+        elif (
+            rendered_inspector_default
+            and inspector_column.type._type_affinity is sqltypes.Integer
+        ):
+            rendered_inspector_default = (
+                re.sub(r"^'|'$", "", rendered_inspector_default)
+                if rendered_inspector_default is not None
+                else None
+            )
+            return rendered_inspector_default != rendered_metadata_default
+        elif (
+            rendered_metadata_default
+            and metadata_column.type._type_affinity is sqltypes.String
+        ):
+            metadata_default = re.sub(r"^'|'$", "", rendered_metadata_default)
+            return rendered_inspector_default != f"'{metadata_default}'"
+        elif rendered_inspector_default and rendered_metadata_default:
+            # adjust for "function()" vs. "FUNCTION" as can occur particularly
+            # for the CURRENT_TIMESTAMP function on newer MariaDB versions
+
+            # SQLAlchemy MySQL dialect bundles ON UPDATE into the server
+            # default; adjust for this possibly being present.
+            onupdate_ins = re.match(
+                r"(.*) (on update.*?)(?:\(\))?$",
+                rendered_inspector_default.lower(),
+            )
+            onupdate_met = re.match(
+                r"(.*) (on update.*?)(?:\(\))?$",
+                rendered_metadata_default.lower(),
+            )
+
+            if onupdate_ins:
+                if not onupdate_met:
+                    return True
+                elif onupdate_ins.group(2) != onupdate_met.group(2):
+                    return True
+
+                rendered_inspector_default = onupdate_ins.group(1)
+                rendered_metadata_default = onupdate_met.group(1)
+
+            return re.sub(
+                r"(.*?)(?:\(\))?$", r"\1", rendered_inspector_default.lower()
+            ) != re.sub(
+                r"(.*?)(?:\(\))?$", r"\1", rendered_metadata_default.lower()
+            )
+        else:
+            return rendered_inspector_default != rendered_metadata_default
+
+    def correct_for_autogen_constraints(
+        self,
+        conn_unique_constraints,
+        conn_indexes,
+        metadata_unique_constraints,
+        metadata_indexes,
+    ):
+        # TODO: if SQLA 1.0, make use of "duplicates_index"
+        # metadata
+        removed = set()
+        for idx in list(conn_indexes):
+            if idx.unique:
+                continue
+            # MySQL puts implicit indexes on FK columns, even if
+            # composite and even if MyISAM, so can't check this too easily.
+            # the name of the index may be the column name or it may
+            # be the name of the FK constraint.
+            for col in idx.columns:
+                if idx.name == col.name:
+                    conn_indexes.remove(idx)
+                    removed.add(idx.name)
+                    break
+                for fk in col.foreign_keys:
+                    if fk.name == idx.name:
+                        conn_indexes.remove(idx)
+                        removed.add(idx.name)
+                        break
+                if idx.name in removed:
+                    break
+
+        # then remove indexes from the "metadata_indexes"
+        # that we've removed from reflected, otherwise they come out
+        # as adds (see #202)
+        for idx in list(metadata_indexes):
+            if idx.name in removed:
+                metadata_indexes.remove(idx)
+
+    def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks):
+        conn_fk_by_sig = {
+            self._create_reflected_constraint_sig(fk).unnamed_no_options: fk
+            for fk in conn_fks
+        }
+        metadata_fk_by_sig = {
+            self._create_metadata_constraint_sig(fk).unnamed_no_options: fk
+            for fk in metadata_fks
+        }
+
+        for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig):
+            mdfk = metadata_fk_by_sig[sig]
+            cnfk = conn_fk_by_sig[sig]
+            # MySQL considers RESTRICT to be the default and doesn't
+            # report on it.  if the model has explicit RESTRICT and
+            # the conn FK has None, set it to RESTRICT
+            if (
+                mdfk.ondelete is not None
+                and mdfk.ondelete.lower() == "restrict"
+                and cnfk.ondelete is None
+            ):
+                cnfk.ondelete = "RESTRICT"
+            if (
+                mdfk.onupdate is not None
+                and mdfk.onupdate.lower() == "restrict"
+                and cnfk.onupdate is None
+            ):
+                cnfk.onupdate = "RESTRICT"
+
+
+class MariaDBImpl(MySQLImpl):
+    __dialect__ = "mariadb"
+
+
+class MySQLAlterDefault(AlterColumn):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        default: _ServerDefault,
+        schema: Optional[str] = None,
+    ) -> None:
+        super(AlterColumn, self).__init__(name, schema=schema)
+        self.column_name = column_name
+        self.default = default
+
+
+class MySQLChangeColumn(AlterColumn):
+    def __init__(
+        self,
+        name: str,
+        column_name: str,
+        schema: Optional[str] = None,
+        newname: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        nullable: Optional[bool] = None,
+        default: Optional[Union[_ServerDefault, Literal[False]]] = False,
+        autoincrement: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+    ) -> None:
+        super(AlterColumn, self).__init__(name, schema=schema)
+        self.column_name = column_name
+        self.nullable = nullable
+        self.newname = newname
+        self.default = default
+        self.autoincrement = autoincrement
+        self.comment = comment
+        if type_ is None:
+            raise util.CommandError(
+                "All MySQL CHANGE/MODIFY COLUMN operations "
+                "require the existing type."
+            )
+
+        self.type_ = sqltypes.to_instance(type_)
+
+
+class MySQLModifyColumn(MySQLChangeColumn):
+    pass
+
+
+@compiles(ColumnNullable, "mysql", "mariadb")
+@compiles(ColumnName, "mysql", "mariadb")
+@compiles(ColumnDefault, "mysql", "mariadb")
+@compiles(ColumnType, "mysql", "mariadb")
+def _mysql_doesnt_support_individual(element, compiler, **kw):
+    raise NotImplementedError(
+        "Individual alter column constructs not supported by MySQL"
+    )
+
+
+@compiles(MySQLAlterDefault, "mysql", "mariadb")
+def _mysql_alter_default(
+    element: MySQLAlterDefault, compiler: MySQLDDLCompiler, **kw
+) -> str:
+    return "%s ALTER COLUMN %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        "SET DEFAULT %s" % format_server_default(compiler, element.default)
+        if element.default is not None
+        else "DROP DEFAULT",
+    )
+
+
+@compiles(MySQLModifyColumn, "mysql", "mariadb")
+def _mysql_modify_column(
+    element: MySQLModifyColumn, compiler: MySQLDDLCompiler, **kw
+) -> str:
+    return "%s MODIFY %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        _mysql_colspec(
+            compiler,
+            nullable=element.nullable,
+            server_default=element.default,
+            type_=element.type_,
+            autoincrement=element.autoincrement,
+            comment=element.comment,
+        ),
+    )
+
+
+@compiles(MySQLChangeColumn, "mysql", "mariadb")
+def _mysql_change_column(
+    element: MySQLChangeColumn, compiler: MySQLDDLCompiler, **kw
+) -> str:
+    return "%s CHANGE %s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+        _mysql_colspec(
+            compiler,
+            nullable=element.nullable,
+            server_default=element.default,
+            type_=element.type_,
+            autoincrement=element.autoincrement,
+            comment=element.comment,
+        ),
+    )
+
+
+def _mysql_colspec(
+    compiler: MySQLDDLCompiler,
+    nullable: Optional[bool],
+    server_default: Optional[Union[_ServerDefault, Literal[False]]],
+    type_: TypeEngine,
+    autoincrement: Optional[bool],
+    comment: Optional[Union[str, Literal[False]]],
+) -> str:
+    spec = "%s %s" % (
+        compiler.dialect.type_compiler.process(type_),
+        "NULL" if nullable else "NOT NULL",
+    )
+    if autoincrement:
+        spec += " AUTO_INCREMENT"
+    if server_default is not False and server_default is not None:
+        spec += " DEFAULT %s" % format_server_default(compiler, server_default)
+    if comment:
+        spec += " COMMENT %s" % compiler.sql_compiler.render_literal_value(
+            comment, sqltypes.String()
+        )
+
+    return spec
+
+
+@compiles(schema.DropConstraint, "mysql", "mariadb")
+def _mysql_drop_constraint(
+    element: DropConstraint, compiler: MySQLDDLCompiler, **kw
+) -> str:
+    """Redefine SQLAlchemy's drop constraint to
+    raise errors for invalid constraint type."""
+
+    constraint = element.element
+    if isinstance(
+        constraint,
+        (
+            schema.ForeignKeyConstraint,
+            schema.PrimaryKeyConstraint,
+            schema.UniqueConstraint,
+        ),
+    ):
+        assert not kw
+        return compiler.visit_drop_constraint(element)
+    elif isinstance(constraint, schema.CheckConstraint):
+        # note that SQLAlchemy as of 1.2 does not yet support
+        # DROP CONSTRAINT for MySQL/MariaDB, so we implement fully
+        # here.
+        if _is_mariadb(compiler.dialect):
+            return "ALTER TABLE %s DROP CONSTRAINT %s" % (
+                compiler.preparer.format_table(constraint.table),
+                compiler.preparer.format_constraint(constraint),
+            )
+        else:
+            return "ALTER TABLE %s DROP CHECK %s" % (
+                compiler.preparer.format_table(constraint.table),
+                compiler.preparer.format_constraint(constraint),
+            )
+    else:
+        raise NotImplementedError(
+            "No generic 'DROP CONSTRAINT' in MySQL - "
+            "please specify constraint type"
+        )
diff --git a/venv/Lib/site-packages/alembic/ddl/oracle.py b/venv/Lib/site-packages/alembic/ddl/oracle.py
new file mode 100644
index 0000000000000000000000000000000000000000..54011740723749b50f53beaac6c75ca020e365a3
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/oracle.py
@@ -0,0 +1,200 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Optional
+from typing import TYPE_CHECKING
+
+from sqlalchemy.sql import sqltypes
+
+from .base import AddColumn
+from .base import alter_table
+from .base import ColumnComment
+from .base import ColumnDefault
+from .base import ColumnName
+from .base import ColumnNullable
+from .base import ColumnType
+from .base import format_column_name
+from .base import format_server_default
+from .base import format_table_name
+from .base import format_type
+from .base import IdentityColumnDefault
+from .base import RenameTable
+from .impl import DefaultImpl
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from sqlalchemy.dialects.oracle.base import OracleDDLCompiler
+    from sqlalchemy.engine.cursor import CursorResult
+    from sqlalchemy.sql.schema import Column
+
+
+class OracleImpl(DefaultImpl):
+    __dialect__ = "oracle"
+    transactional_ddl = False
+    batch_separator = "/"
+    command_terminator = ""
+    type_synonyms = DefaultImpl.type_synonyms + (
+        {"VARCHAR", "VARCHAR2"},
+        {"BIGINT", "INTEGER", "SMALLINT", "DECIMAL", "NUMERIC", "NUMBER"},
+        {"DOUBLE", "FLOAT", "DOUBLE_PRECISION"},
+    )
+    identity_attrs_ignore = ()
+
+    def __init__(self, *arg, **kw) -> None:
+        super().__init__(*arg, **kw)
+        self.batch_separator = self.context_opts.get(
+            "oracle_batch_separator", self.batch_separator
+        )
+
+    def _exec(self, construct: Any, *args, **kw) -> Optional[CursorResult]:
+        result = super()._exec(construct, *args, **kw)
+        if self.as_sql and self.batch_separator:
+            self.static_output(self.batch_separator)
+        return result
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        if rendered_metadata_default is not None:
+            rendered_metadata_default = re.sub(
+                r"^\((.+)\)$", r"\1", rendered_metadata_default
+            )
+
+            rendered_metadata_default = re.sub(
+                r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default
+            )
+
+        if rendered_inspector_default is not None:
+            rendered_inspector_default = re.sub(
+                r"^\((.+)\)$", r"\1", rendered_inspector_default
+            )
+
+            rendered_inspector_default = re.sub(
+                r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default
+            )
+
+            rendered_inspector_default = rendered_inspector_default.strip()
+        return rendered_inspector_default != rendered_metadata_default
+
+    def emit_begin(self) -> None:
+        self._exec("SET TRANSACTION READ WRITE")
+
+    def emit_commit(self) -> None:
+        self._exec("COMMIT")
+
+
+@compiles(AddColumn, "oracle")
+def visit_add_column(
+    element: AddColumn, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        add_column(compiler, element.column, **kw),
+    )
+
+
+@compiles(ColumnNullable, "oracle")
+def visit_column_nullable(
+    element: ColumnNullable, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "NULL" if element.nullable else "NOT NULL",
+    )
+
+
+@compiles(ColumnType, "oracle")
+def visit_column_type(
+    element: ColumnType, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "%s" % format_type(compiler, element.type_),
+    )
+
+
+@compiles(ColumnName, "oracle")
+def visit_column_name(
+    element: ColumnName, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s RENAME COLUMN %s TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_column_name(compiler, element.column_name),
+        format_column_name(compiler, element.newname),
+    )
+
+
+@compiles(ColumnDefault, "oracle")
+def visit_column_default(
+    element: ColumnDefault, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "DEFAULT %s" % format_server_default(compiler, element.default)
+        if element.default is not None
+        else "DEFAULT NULL",
+    )
+
+
+@compiles(ColumnComment, "oracle")
+def visit_column_comment(
+    element: ColumnComment, compiler: OracleDDLCompiler, **kw
+) -> str:
+    ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}"
+
+    comment = compiler.sql_compiler.render_literal_value(
+        (element.comment if element.comment is not None else ""),
+        sqltypes.String(),
+    )
+
+    return ddl.format(
+        table_name=element.table_name,
+        column_name=element.column_name,
+        comment=comment,
+    )
+
+
+@compiles(RenameTable, "oracle")
+def visit_rename_table(
+    element: RenameTable, compiler: OracleDDLCompiler, **kw
+) -> str:
+    return "%s RENAME TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, None),
+    )
+
+
+def alter_column(compiler: OracleDDLCompiler, name: str) -> str:
+    return "MODIFY %s" % format_column_name(compiler, name)
+
+
+def add_column(compiler: OracleDDLCompiler, column: Column[Any], **kw) -> str:
+    return "ADD %s" % compiler.get_column_specification(column, **kw)
+
+
+@compiles(IdentityColumnDefault, "oracle")
+def visit_identity_column(
+    element: IdentityColumnDefault, compiler: OracleDDLCompiler, **kw
+):
+    text = "%s %s " % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+    )
+    if element.default is None:
+        # drop identity
+        text += "DROP IDENTITY"
+        return text
+    else:
+        text += compiler.visit_identity_column(element.default)
+        return text
diff --git a/venv/Lib/site-packages/alembic/ddl/postgresql.py b/venv/Lib/site-packages/alembic/ddl/postgresql.py
new file mode 100644
index 0000000000000000000000000000000000000000..6507fcbdd75c82873e3fc4ff5e3030d8638c0474
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/postgresql.py
@@ -0,0 +1,848 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import logging
+import re
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import Column
+from sqlalchemy import literal_column
+from sqlalchemy import Numeric
+from sqlalchemy import text
+from sqlalchemy import types as sqltypes
+from sqlalchemy.dialects.postgresql import BIGINT
+from sqlalchemy.dialects.postgresql import ExcludeConstraint
+from sqlalchemy.dialects.postgresql import INTEGER
+from sqlalchemy.schema import CreateIndex
+from sqlalchemy.sql.elements import ColumnClause
+from sqlalchemy.sql.elements import TextClause
+from sqlalchemy.sql.functions import FunctionElement
+from sqlalchemy.types import NULLTYPE
+
+from .base import alter_column
+from .base import alter_table
+from .base import AlterColumn
+from .base import ColumnComment
+from .base import format_column_name
+from .base import format_table_name
+from .base import format_type
+from .base import IdentityColumnDefault
+from .base import RenameTable
+from .impl import ComparisonResult
+from .impl import DefaultImpl
+from .. import util
+from ..autogenerate import render
+from ..operations import ops
+from ..operations import schemaobj
+from ..operations.base import BatchOperations
+from ..operations.base import Operations
+from ..util import sqla_compat
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy import Index
+    from sqlalchemy import UniqueConstraint
+    from sqlalchemy.dialects.postgresql.array import ARRAY
+    from sqlalchemy.dialects.postgresql.base import PGDDLCompiler
+    from sqlalchemy.dialects.postgresql.hstore import HSTORE
+    from sqlalchemy.dialects.postgresql.json import JSON
+    from sqlalchemy.dialects.postgresql.json import JSONB
+    from sqlalchemy.sql.elements import ClauseElement
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .base import _ServerDefault
+    from ..autogenerate.api import AutogenContext
+    from ..autogenerate.render import _f_name
+    from ..runtime.migration import MigrationContext
+
+
+log = logging.getLogger(__name__)
+
+
+class PostgresqlImpl(DefaultImpl):
+    __dialect__ = "postgresql"
+    transactional_ddl = True
+    type_synonyms = DefaultImpl.type_synonyms + (
+        {"FLOAT", "DOUBLE PRECISION"},
+    )
+
+    def create_index(self, index: Index, **kw: Any) -> None:
+        # this likely defaults to None if not present, so get()
+        # should normally not return the default value.  being
+        # defensive in any case
+        postgresql_include = index.kwargs.get("postgresql_include", None) or ()
+        for col in postgresql_include:
+            if col not in index.table.c:  # type: ignore[union-attr]
+                index.table.append_column(  # type: ignore[union-attr]
+                    Column(col, sqltypes.NullType)
+                )
+        self._exec(CreateIndex(index, **kw))
+
+    def prep_table_for_batch(self, batch_impl, table):
+        for constraint in table.constraints:
+            if (
+                constraint.name is not None
+                and constraint.name in batch_impl.named_constraints
+            ):
+                self.drop_constraint(constraint)
+
+    def compare_server_default(
+        self,
+        inspector_column,
+        metadata_column,
+        rendered_metadata_default,
+        rendered_inspector_default,
+    ):
+        # don't do defaults for SERIAL columns
+        if (
+            metadata_column.primary_key
+            and metadata_column is metadata_column.table._autoincrement_column
+        ):
+            return False
+
+        conn_col_default = rendered_inspector_default
+
+        defaults_equal = conn_col_default == rendered_metadata_default
+        if defaults_equal:
+            return False
+
+        if None in (
+            conn_col_default,
+            rendered_metadata_default,
+            metadata_column.server_default,
+        ):
+            return not defaults_equal
+
+        metadata_default = metadata_column.server_default.arg
+
+        if isinstance(metadata_default, str):
+            if not isinstance(inspector_column.type, Numeric):
+                metadata_default = re.sub(r"^'|'$", "", metadata_default)
+                metadata_default = f"'{metadata_default}'"
+
+            metadata_default = literal_column(metadata_default)
+
+        # run a real compare against the server
+        conn = self.connection
+        assert conn is not None
+        return not conn.scalar(
+            sqla_compat._select(
+                literal_column(conn_col_default) == metadata_default
+            )
+        )
+
+    def alter_column(  # type:ignore[override]
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Union[_ServerDefault, Literal[False]] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        schema: Optional[str] = None,
+        autoincrement: Optional[bool] = None,
+        existing_type: Optional[TypeEngine] = None,
+        existing_server_default: Optional[_ServerDefault] = None,
+        existing_nullable: Optional[bool] = None,
+        existing_autoincrement: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        using = kw.pop("postgresql_using", None)
+
+        if using is not None and type_ is None:
+            raise util.CommandError(
+                "postgresql_using must be used with the type_ parameter"
+            )
+
+        if type_ is not None:
+            self._exec(
+                PostgresqlColumnType(
+                    table_name,
+                    column_name,
+                    type_,
+                    schema=schema,
+                    using=using,
+                    existing_type=existing_type,
+                    existing_server_default=existing_server_default,
+                    existing_nullable=existing_nullable,
+                )
+            )
+
+        super().alter_column(
+            table_name,
+            column_name,
+            nullable=nullable,
+            server_default=server_default,
+            name=name,
+            schema=schema,
+            autoincrement=autoincrement,
+            existing_type=existing_type,
+            existing_server_default=existing_server_default,
+            existing_nullable=existing_nullable,
+            existing_autoincrement=existing_autoincrement,
+            **kw,
+        )
+
+    def autogen_column_reflect(self, inspector, table, column_info):
+        if column_info.get("default") and isinstance(
+            column_info["type"], (INTEGER, BIGINT)
+        ):
+            seq_match = re.match(
+                r"nextval\('(.+?)'::regclass\)", column_info["default"]
+            )
+            if seq_match:
+                info = sqla_compat._exec_on_inspector(
+                    inspector,
+                    text(
+                        "select c.relname, a.attname "
+                        "from pg_class as c join "
+                        "pg_depend d on d.objid=c.oid and "
+                        "d.classid='pg_class'::regclass and "
+                        "d.refclassid='pg_class'::regclass "
+                        "join pg_class t on t.oid=d.refobjid "
+                        "join pg_attribute a on a.attrelid=t.oid and "
+                        "a.attnum=d.refobjsubid "
+                        "where c.relkind='S' and c.relname=:seqname"
+                    ),
+                    seqname=seq_match.group(1),
+                ).first()
+                if info:
+                    seqname, colname = info
+                    if colname == column_info["name"]:
+                        log.info(
+                            "Detected sequence named '%s' as "
+                            "owned by integer column '%s(%s)', "
+                            "assuming SERIAL and omitting",
+                            seqname,
+                            table.name,
+                            colname,
+                        )
+                        # sequence, and the owner is this column,
+                        # its a SERIAL - whack it!
+                        del column_info["default"]
+
+    def correct_for_autogen_constraints(
+        self,
+        conn_unique_constraints,
+        conn_indexes,
+        metadata_unique_constraints,
+        metadata_indexes,
+    ):
+        doubled_constraints = {
+            index
+            for index in conn_indexes
+            if index.info.get("duplicates_constraint")
+        }
+
+        for ix in doubled_constraints:
+            conn_indexes.remove(ix)
+
+        if not sqla_compat.sqla_2:
+            self._skip_functional_indexes(metadata_indexes, conn_indexes)
+
+    # pg behavior regarding modifiers
+    # | # | compiled sql     | returned sql     | regexp. group is removed |
+    # | - | ---------------- | -----------------| ------------------------ |
+    # | 1 | nulls first      | nulls first      | -                        |
+    # | 2 | nulls last       |                  | (?<! desc)( nulls last)$ |
+    # | 3 | asc              |                  | ( asc)$                  |
+    # | 4 | asc nulls first  | nulls first      | ( asc) nulls first$      |
+    # | 5 | asc nulls last   |                  | ( asc nulls last)$       |
+    # | 6 | desc             | desc             | -                        |
+    # | 7 | desc nulls first | desc             | desc( nulls first)$      |
+    # | 8 | desc nulls last  | desc nulls last  | -                        |
+    _default_modifiers_re = (  # order of case 2 and 5 matters
+        re.compile("( asc nulls last)$"),  # case 5
+        re.compile("(?<! desc)( nulls last)$"),  # case 2
+        re.compile("( asc)$"),  # case 3
+        re.compile("( asc) nulls first$"),  # case 4
+        re.compile(" desc( nulls first)$"),  # case 7
+    )
+
+    def _cleanup_index_expr(self, index: Index, expr: str) -> str:
+        expr = expr.lower().replace('"', "").replace("'", "")
+        if index.table is not None:
+            # should not be needed, since include_table=False is in compile
+            expr = expr.replace(f"{index.table.name.lower()}.", "")
+
+        if "::" in expr:
+            # strip :: cast. types can have spaces in them
+            expr = re.sub(r"(::[\w ]+\w)", "", expr)
+
+        while expr and expr[0] == "(" and expr[-1] == ")":
+            expr = expr[1:-1]
+
+        # NOTE: when parsing the connection expression this cleanup could
+        # be skipped
+        for rs in self._default_modifiers_re:
+            if match := rs.search(expr):
+                start, end = match.span(1)
+                expr = expr[:start] + expr[end:]
+                break
+
+        while expr and expr[0] == "(" and expr[-1] == ")":
+            expr = expr[1:-1]
+
+        # strip casts
+        cast_re = re.compile(r"cast\s*\(")
+        if cast_re.match(expr):
+            expr = cast_re.sub("", expr)
+            # remove the as type
+            expr = re.sub(r"as\s+[^)]+\)", "", expr)
+        # remove spaces
+        expr = expr.replace(" ", "")
+        return expr
+
+    def _dialect_options(
+        self, item: Union[Index, UniqueConstraint]
+    ) -> Tuple[Any, ...]:
+        # only the positive case is returned by sqlalchemy reflection so
+        # None and False are threated the same
+        if item.dialect_kwargs.get("postgresql_nulls_not_distinct"):
+            return ("nulls_not_distinct",)
+        return ()
+
+    def compare_indexes(
+        self,
+        metadata_index: Index,
+        reflected_index: Index,
+    ) -> ComparisonResult:
+        msg = []
+        unique_msg = self._compare_index_unique(
+            metadata_index, reflected_index
+        )
+        if unique_msg:
+            msg.append(unique_msg)
+        m_exprs = metadata_index.expressions
+        r_exprs = reflected_index.expressions
+        if len(m_exprs) != len(r_exprs):
+            msg.append(f"expression number {len(r_exprs)} to {len(m_exprs)}")
+        if msg:
+            # no point going further, return early
+            return ComparisonResult.Different(msg)
+        skip = []
+        for pos, (m_e, r_e) in enumerate(zip(m_exprs, r_exprs), 1):
+            m_compile = self._compile_element(m_e)
+            m_text = self._cleanup_index_expr(metadata_index, m_compile)
+            # print(f"META ORIG: {m_compile!r} CLEANUP: {m_text!r}")
+            r_compile = self._compile_element(r_e)
+            r_text = self._cleanup_index_expr(metadata_index, r_compile)
+            # print(f"CONN ORIG: {r_compile!r} CLEANUP: {r_text!r}")
+            if m_text == r_text:
+                continue  # expressions these are equal
+            elif m_compile.strip().endswith("_ops") and (
+                " " in m_compile or ")" in m_compile  # is an expression
+            ):
+                skip.append(
+                    f"expression #{pos} {m_compile!r} detected "
+                    "as including operator clause."
+                )
+                util.warn(
+                    f"Expression #{pos} {m_compile!r} in index "
+                    f"{reflected_index.name!r} detected to include "
+                    "an operator clause. Expression compare cannot proceed. "
+                    "Please move the operator clause to the "
+                    "``postgresql_ops`` dict to enable proper compare "
+                    "of the index expressions: "
+                    "https://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#operator-classes",  # noqa: E501
+                )
+            else:
+                msg.append(f"expression #{pos} {r_compile!r} to {m_compile!r}")
+
+        m_options = self._dialect_options(metadata_index)
+        r_options = self._dialect_options(reflected_index)
+        if m_options != r_options:
+            msg.extend(f"options {r_options} to {m_options}")
+
+        if msg:
+            return ComparisonResult.Different(msg)
+        elif skip:
+            # if there are other changes detected don't skip the index
+            return ComparisonResult.Skip(skip)
+        else:
+            return ComparisonResult.Equal()
+
+    def compare_unique_constraint(
+        self,
+        metadata_constraint: UniqueConstraint,
+        reflected_constraint: UniqueConstraint,
+    ) -> ComparisonResult:
+        metadata_tup = self._create_metadata_constraint_sig(
+            metadata_constraint
+        )
+        reflected_tup = self._create_reflected_constraint_sig(
+            reflected_constraint
+        )
+
+        meta_sig = metadata_tup.unnamed
+        conn_sig = reflected_tup.unnamed
+        if conn_sig != meta_sig:
+            return ComparisonResult.Different(
+                f"expression {conn_sig} to {meta_sig}"
+            )
+
+        metadata_do = self._dialect_options(metadata_tup.const)
+        conn_do = self._dialect_options(reflected_tup.const)
+        if metadata_do != conn_do:
+            return ComparisonResult.Different(
+                f"expression {conn_do} to {metadata_do}"
+            )
+
+        return ComparisonResult.Equal()
+
+    def adjust_reflected_dialect_options(
+        self, reflected_options: Dict[str, Any], kind: str
+    ) -> Dict[str, Any]:
+        options: Dict[str, Any]
+        options = reflected_options.get("dialect_options", {}).copy()
+        if not options.get("postgresql_include"):
+            options.pop("postgresql_include", None)
+        return options
+
+    def _compile_element(self, element: Union[ClauseElement, str]) -> str:
+        if isinstance(element, str):
+            return element
+        return element.compile(
+            dialect=self.dialect,
+            compile_kwargs={"literal_binds": True, "include_table": False},
+        ).string
+
+    def render_ddl_sql_expr(
+        self,
+        expr: ClauseElement,
+        is_server_default: bool = False,
+        is_index: bool = False,
+        **kw: Any,
+    ) -> str:
+        """Render a SQL expression that is typically a server default,
+        index expression, etc.
+
+        """
+
+        # apply self_group to index expressions;
+        # see https://github.com/sqlalchemy/sqlalchemy/blob/
+        # 82fa95cfce070fab401d020c6e6e4a6a96cc2578/
+        # lib/sqlalchemy/dialects/postgresql/base.py#L2261
+        if is_index and not isinstance(expr, ColumnClause):
+            expr = expr.self_group()
+
+        return super().render_ddl_sql_expr(
+            expr, is_server_default=is_server_default, is_index=is_index, **kw
+        )
+
+    def render_type(
+        self, type_: TypeEngine, autogen_context: AutogenContext
+    ) -> Union[str, Literal[False]]:
+        mod = type(type_).__module__
+        if not mod.startswith("sqlalchemy.dialects.postgresql"):
+            return False
+
+        if hasattr(self, "_render_%s_type" % type_.__visit_name__):
+            meth = getattr(self, "_render_%s_type" % type_.__visit_name__)
+            return meth(type_, autogen_context)
+
+        return False
+
+    def _render_HSTORE_type(
+        self, type_: HSTORE, autogen_context: AutogenContext
+    ) -> str:
+        return cast(
+            str,
+            render._render_type_w_subtype(
+                type_, autogen_context, "text_type", r"(.+?\(.*text_type=)"
+            ),
+        )
+
+    def _render_ARRAY_type(
+        self, type_: ARRAY, autogen_context: AutogenContext
+    ) -> str:
+        return cast(
+            str,
+            render._render_type_w_subtype(
+                type_, autogen_context, "item_type", r"(.+?\()"
+            ),
+        )
+
+    def _render_JSON_type(
+        self, type_: JSON, autogen_context: AutogenContext
+    ) -> str:
+        return cast(
+            str,
+            render._render_type_w_subtype(
+                type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)"
+            ),
+        )
+
+    def _render_JSONB_type(
+        self, type_: JSONB, autogen_context: AutogenContext
+    ) -> str:
+        return cast(
+            str,
+            render._render_type_w_subtype(
+                type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)"
+            ),
+        )
+
+
+class PostgresqlColumnType(AlterColumn):
+    def __init__(
+        self, name: str, column_name: str, type_: TypeEngine, **kw
+    ) -> None:
+        using = kw.pop("using", None)
+        super().__init__(name, column_name, **kw)
+        self.type_ = sqltypes.to_instance(type_)
+        self.using = using
+
+
+@compiles(RenameTable, "postgresql")
+def visit_rename_table(
+    element: RenameTable, compiler: PGDDLCompiler, **kw
+) -> str:
+    return "%s RENAME TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, None),
+    )
+
+
+@compiles(PostgresqlColumnType, "postgresql")
+def visit_column_type(
+    element: PostgresqlColumnType, compiler: PGDDLCompiler, **kw
+) -> str:
+    return "%s %s %s %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+        "TYPE %s" % format_type(compiler, element.type_),
+        "USING %s" % element.using if element.using else "",
+    )
+
+
+@compiles(ColumnComment, "postgresql")
+def visit_column_comment(
+    element: ColumnComment, compiler: PGDDLCompiler, **kw
+) -> str:
+    ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}"
+    comment = (
+        compiler.sql_compiler.render_literal_value(
+            element.comment, sqltypes.String()
+        )
+        if element.comment is not None
+        else "NULL"
+    )
+
+    return ddl.format(
+        table_name=format_table_name(
+            compiler, element.table_name, element.schema
+        ),
+        column_name=format_column_name(compiler, element.column_name),
+        comment=comment,
+    )
+
+
+@compiles(IdentityColumnDefault, "postgresql")
+def visit_identity_column(
+    element: IdentityColumnDefault, compiler: PGDDLCompiler, **kw
+):
+    text = "%s %s " % (
+        alter_table(compiler, element.table_name, element.schema),
+        alter_column(compiler, element.column_name),
+    )
+    if element.default is None:
+        # drop identity
+        text += "DROP IDENTITY"
+        return text
+    elif element.existing_server_default is None:
+        # add identity options
+        text += "ADD "
+        text += compiler.visit_identity_column(element.default)
+        return text
+    else:
+        # alter identity
+        diff, _, _ = element.impl._compare_identity_default(
+            element.default, element.existing_server_default
+        )
+        identity = element.default
+        for attr in sorted(diff):
+            if attr == "always":
+                text += "SET GENERATED %s " % (
+                    "ALWAYS" if identity.always else "BY DEFAULT"
+                )
+            else:
+                text += "SET %s " % compiler.get_identity_options(
+                    sqla_compat.Identity(**{attr: getattr(identity, attr)})
+                )
+        return text
+
+
+@Operations.register_operation("create_exclude_constraint")
+@BatchOperations.register_operation(
+    "create_exclude_constraint", "batch_create_exclude_constraint"
+)
+@ops.AddConstraintOp.register_add_constraint("exclude_constraint")
+class CreateExcludeConstraintOp(ops.AddConstraintOp):
+    """Represent a create exclude constraint operation."""
+
+    constraint_type = "exclude"
+
+    def __init__(
+        self,
+        constraint_name: sqla_compat._ConstraintName,
+        table_name: Union[str, quoted_name],
+        elements: Union[
+            Sequence[Tuple[str, str]],
+            Sequence[Tuple[ColumnClause[Any], str]],
+        ],
+        where: Optional[Union[ColumnElement[bool], str]] = None,
+        schema: Optional[str] = None,
+        _orig_constraint: Optional[ExcludeConstraint] = None,
+        **kw,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.elements = elements
+        self.where = where
+        self.schema = schema
+        self._orig_constraint = _orig_constraint
+        self.kw = kw
+
+    @classmethod
+    def from_constraint(  # type:ignore[override]
+        cls, constraint: ExcludeConstraint
+    ) -> CreateExcludeConstraintOp:
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+        return cls(
+            constraint.name,
+            constraint_table.name,
+            [  # type: ignore
+                (expr, op) for expr, name, op in constraint._render_exprs
+            ],
+            where=cast("ColumnElement[bool] | None", constraint.where),
+            schema=constraint_table.schema,
+            _orig_constraint=constraint,
+            deferrable=constraint.deferrable,
+            initially=constraint.initially,
+            using=constraint.using,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> ExcludeConstraint:
+        if self._orig_constraint is not None:
+            return self._orig_constraint
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        t = schema_obj.table(self.table_name, schema=self.schema)
+        excl = ExcludeConstraint(
+            *self.elements,
+            name=self.constraint_name,
+            where=self.where,
+            **self.kw,
+        )
+        for (
+            expr,
+            name,
+            oper,
+        ) in excl._render_exprs:
+            t.append_column(Column(name, NULLTYPE))
+        t.append_constraint(excl)
+        return excl
+
+    @classmethod
+    def create_exclude_constraint(
+        cls,
+        operations: Operations,
+        constraint_name: str,
+        table_name: str,
+        *elements: Any,
+        **kw: Any,
+    ) -> Optional[Table]:
+        """Issue an alter to create an EXCLUDE constraint using the
+        current migration context.
+
+        .. note::  This method is Postgresql specific, and additionally
+           requires at least SQLAlchemy 1.0.
+
+        e.g.::
+
+            from alembic import op
+
+            op.create_exclude_constraint(
+                "user_excl",
+                "user",
+                ("period", "&&"),
+                ("group", "="),
+                where=("group != 'some group'"),
+            )
+
+        Note that the expressions work the same way as that of
+        the ``ExcludeConstraint`` object itself; if plain strings are
+        passed, quoting rules must be applied manually.
+
+        :param name: Name of the constraint.
+        :param table_name: String name of the source table.
+        :param elements: exclude conditions.
+        :param where: SQL expression or SQL string with optional WHERE
+         clause.
+        :param deferrable: optional bool. If set, emit DEFERRABLE or
+         NOT DEFERRABLE when issuing DDL for this constraint.
+        :param initially: optional string. If set, emit INITIALLY <value>
+         when issuing DDL for this constraint.
+        :param schema: Optional schema name to operate within.
+
+        """
+        op = cls(constraint_name, table_name, elements, **kw)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_exclude_constraint(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        *elements: Any,
+        **kw: Any,
+    ) -> Optional[Table]:
+        """Issue a "create exclude constraint" instruction using the
+        current batch migration context.
+
+        .. note::  This method is Postgresql specific, and additionally
+           requires at least SQLAlchemy 1.0.
+
+        .. seealso::
+
+            :meth:`.Operations.create_exclude_constraint`
+
+        """
+        kw["schema"] = operations.impl.schema
+        op = cls(constraint_name, operations.impl.table_name, elements, **kw)
+        return operations.invoke(op)
+
+
+@render.renderers.dispatch_for(CreateExcludeConstraintOp)
+def _add_exclude_constraint(
+    autogen_context: AutogenContext, op: CreateExcludeConstraintOp
+) -> str:
+    return _exclude_constraint(op.to_constraint(), autogen_context, alter=True)
+
+
+@render._constraint_renderers.dispatch_for(ExcludeConstraint)
+def _render_inline_exclude_constraint(
+    constraint: ExcludeConstraint,
+    autogen_context: AutogenContext,
+    namespace_metadata: MetaData,
+) -> str:
+    rendered = render._user_defined_render(
+        "exclude", constraint, autogen_context
+    )
+    if rendered is not False:
+        return rendered
+
+    return _exclude_constraint(constraint, autogen_context, False)
+
+
+def _postgresql_autogenerate_prefix(autogen_context: AutogenContext) -> str:
+    imports = autogen_context.imports
+    if imports is not None:
+        imports.add("from sqlalchemy.dialects import postgresql")
+    return "postgresql."
+
+
+def _exclude_constraint(
+    constraint: ExcludeConstraint,
+    autogen_context: AutogenContext,
+    alter: bool,
+) -> str:
+    opts: List[Tuple[str, Union[quoted_name, str, _f_name, None]]] = []
+
+    has_batch = autogen_context._has_batch
+
+    if constraint.deferrable:
+        opts.append(("deferrable", str(constraint.deferrable)))
+    if constraint.initially:
+        opts.append(("initially", str(constraint.initially)))
+    if constraint.using:
+        opts.append(("using", str(constraint.using)))
+    if not has_batch and alter and constraint.table.schema:
+        opts.append(("schema", render._ident(constraint.table.schema)))
+    if not alter and constraint.name:
+        opts.append(
+            ("name", render._render_gen_name(autogen_context, constraint.name))
+        )
+
+    def do_expr_where_opts():
+        args = [
+            "(%s, %r)"
+            % (
+                _render_potential_column(
+                    sqltext,  # type:ignore[arg-type]
+                    autogen_context,
+                ),
+                opstring,
+            )
+            for sqltext, name, opstring in constraint._render_exprs
+        ]
+        if constraint.where is not None:
+            args.append(
+                "where=%s"
+                % render._render_potential_expr(
+                    constraint.where, autogen_context
+                )
+            )
+        args.extend(["%s=%r" % (k, v) for k, v in opts])
+        return args
+
+    if alter:
+        args = [
+            repr(render._render_gen_name(autogen_context, constraint.name))
+        ]
+        if not has_batch:
+            args += [repr(render._ident(constraint.table.name))]
+        args.extend(do_expr_where_opts())
+        return "%(prefix)screate_exclude_constraint(%(args)s)" % {
+            "prefix": render._alembic_autogenerate_prefix(autogen_context),
+            "args": ", ".join(args),
+        }
+    else:
+        args = do_expr_where_opts()
+        return "%(prefix)sExcludeConstraint(%(args)s)" % {
+            "prefix": _postgresql_autogenerate_prefix(autogen_context),
+            "args": ", ".join(args),
+        }
+
+
+def _render_potential_column(
+    value: Union[
+        ColumnClause[Any], Column[Any], TextClause, FunctionElement[Any]
+    ],
+    autogen_context: AutogenContext,
+) -> str:
+    if isinstance(value, ColumnClause):
+        if value.is_literal:
+            # like literal_column("int8range(from, to)") in ExcludeConstraint
+            template = "%(prefix)sliteral_column(%(name)r)"
+        else:
+            template = "%(prefix)scolumn(%(name)r)"
+
+        return template % {
+            "prefix": render._sqlalchemy_autogenerate_prefix(autogen_context),
+            "name": value.name,
+        }
+    else:
+        return render._render_potential_expr(
+            value,
+            autogen_context,
+            wrap_in_text=isinstance(value, (TextClause, FunctionElement)),
+        )
diff --git a/venv/Lib/site-packages/alembic/ddl/sqlite.py b/venv/Lib/site-packages/alembic/ddl/sqlite.py
new file mode 100644
index 0000000000000000000000000000000000000000..762e8ca198a6af4d001afd362ff15ac9c43a2821
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/ddl/sqlite.py
@@ -0,0 +1,225 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import re
+from typing import Any
+from typing import Dict
+from typing import Optional
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import cast
+from sqlalchemy import JSON
+from sqlalchemy import schema
+from sqlalchemy import sql
+
+from .base import alter_table
+from .base import format_table_name
+from .base import RenameTable
+from .impl import DefaultImpl
+from .. import util
+from ..util.sqla_compat import compiles
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine.reflection import Inspector
+    from sqlalchemy.sql.compiler import DDLCompiler
+    from sqlalchemy.sql.elements import Cast
+    from sqlalchemy.sql.elements import ClauseElement
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from ..operations.batch import BatchOperationsImpl
+
+
+class SQLiteImpl(DefaultImpl):
+    __dialect__ = "sqlite"
+
+    transactional_ddl = False
+    """SQLite supports transactional DDL, but pysqlite does not:
+    see: http://bugs.python.org/issue10740
+    """
+
+    def requires_recreate_in_batch(
+        self, batch_op: BatchOperationsImpl
+    ) -> bool:
+        """Return True if the given :class:`.BatchOperationsImpl`
+        would need the table to be recreated and copied in order to
+        proceed.
+
+        Normally, only returns True on SQLite when operations other
+        than add_column are present.
+
+        """
+        for op in batch_op.batch:
+            if op[0] == "add_column":
+                col = op[1][1]
+                if isinstance(
+                    col.server_default, schema.DefaultClause
+                ) and isinstance(col.server_default.arg, sql.ClauseElement):
+                    return True
+                elif (
+                    isinstance(col.server_default, util.sqla_compat.Computed)
+                    and col.server_default.persisted
+                ):
+                    return True
+            elif op[0] not in ("create_index", "drop_index"):
+                return True
+        else:
+            return False
+
+    def add_constraint(self, const: Constraint):
+        # attempt to distinguish between an
+        # auto-gen constraint and an explicit one
+        if const._create_rule is None:
+            raise NotImplementedError(
+                "No support for ALTER of constraints in SQLite dialect. "
+                "Please refer to the batch mode feature which allows for "
+                "SQLite migrations using a copy-and-move strategy."
+            )
+        elif const._create_rule(self):
+            util.warn(
+                "Skipping unsupported ALTER for "
+                "creation of implicit constraint. "
+                "Please refer to the batch mode feature which allows for "
+                "SQLite migrations using a copy-and-move strategy."
+            )
+
+    def drop_constraint(self, const: Constraint):
+        if const._create_rule is None:
+            raise NotImplementedError(
+                "No support for ALTER of constraints in SQLite dialect. "
+                "Please refer to the batch mode feature which allows for "
+                "SQLite migrations using a copy-and-move strategy."
+            )
+
+    def compare_server_default(
+        self,
+        inspector_column: Column[Any],
+        metadata_column: Column[Any],
+        rendered_metadata_default: Optional[str],
+        rendered_inspector_default: Optional[str],
+    ) -> bool:
+        if rendered_metadata_default is not None:
+            rendered_metadata_default = re.sub(
+                r"^\((.+)\)$", r"\1", rendered_metadata_default
+            )
+
+            rendered_metadata_default = re.sub(
+                r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default
+            )
+
+        if rendered_inspector_default is not None:
+            rendered_inspector_default = re.sub(
+                r"^\((.+)\)$", r"\1", rendered_inspector_default
+            )
+
+            rendered_inspector_default = re.sub(
+                r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default
+            )
+
+        return rendered_inspector_default != rendered_metadata_default
+
+    def _guess_if_default_is_unparenthesized_sql_expr(
+        self, expr: Optional[str]
+    ) -> bool:
+        """Determine if a server default is a SQL expression or a constant.
+
+        There are too many assertions that expect server defaults to round-trip
+        identically without parenthesis added so we will add parens only in
+        very specific cases.
+
+        """
+        if not expr:
+            return False
+        elif re.match(r"^[0-9\.]$", expr):
+            return False
+        elif re.match(r"^'.+'$", expr):
+            return False
+        elif re.match(r"^\(.+\)$", expr):
+            return False
+        else:
+            return True
+
+    def autogen_column_reflect(
+        self,
+        inspector: Inspector,
+        table: Table,
+        column_info: Dict[str, Any],
+    ) -> None:
+        # SQLite expression defaults require parenthesis when sent
+        # as DDL
+        if self._guess_if_default_is_unparenthesized_sql_expr(
+            column_info.get("default", None)
+        ):
+            column_info["default"] = "(%s)" % (column_info["default"],)
+
+    def render_ddl_sql_expr(
+        self, expr: ClauseElement, is_server_default: bool = False, **kw
+    ) -> str:
+        # SQLite expression defaults require parenthesis when sent
+        # as DDL
+        str_expr = super().render_ddl_sql_expr(
+            expr, is_server_default=is_server_default, **kw
+        )
+
+        if (
+            is_server_default
+            and self._guess_if_default_is_unparenthesized_sql_expr(str_expr)
+        ):
+            str_expr = "(%s)" % (str_expr,)
+        return str_expr
+
+    def cast_for_batch_migrate(
+        self,
+        existing: Column[Any],
+        existing_transfer: Dict[str, Union[TypeEngine, Cast]],
+        new_type: TypeEngine,
+    ) -> None:
+        if (
+            existing.type._type_affinity is not new_type._type_affinity
+            and not isinstance(new_type, JSON)
+        ):
+            existing_transfer["expr"] = cast(
+                existing_transfer["expr"], new_type
+            )
+
+    def correct_for_autogen_constraints(
+        self,
+        conn_unique_constraints,
+        conn_indexes,
+        metadata_unique_constraints,
+        metadata_indexes,
+    ):
+        self._skip_functional_indexes(metadata_indexes, conn_indexes)
+
+
+@compiles(RenameTable, "sqlite")
+def visit_rename_table(
+    element: RenameTable, compiler: DDLCompiler, **kw
+) -> str:
+    return "%s RENAME TO %s" % (
+        alter_table(compiler, element.table_name, element.schema),
+        format_table_name(compiler, element.new_table_name, None),
+    )
+
+
+# @compiles(AddColumn, 'sqlite')
+# def visit_add_column(element, compiler, **kw):
+#    return "%s %s" % (
+#        alter_table(compiler, element.table_name, element.schema),
+#        add_column(compiler, element.column, **kw)
+#    )
+
+
+# def add_column(compiler, column, **kw):
+#    text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw)
+# need to modify SQLAlchemy so that the CHECK associated with a Boolean
+# or Enum gets placed as part of the column constraints, not the Table
+# see ticket 98
+#    for const in column.constraints:
+#        text += compiler.process(AddConstraint(const))
+#    return text
diff --git a/venv/Lib/site-packages/alembic/environment.py b/venv/Lib/site-packages/alembic/environment.py
new file mode 100644
index 0000000000000000000000000000000000000000..adfc93eb0c2fdf4e8104faab95bdb4bdd210fbaa
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/environment.py
@@ -0,0 +1 @@
+from .runtime.environment import *  # noqa
diff --git a/venv/Lib/site-packages/alembic/migration.py b/venv/Lib/site-packages/alembic/migration.py
new file mode 100644
index 0000000000000000000000000000000000000000..02626e2cf6d4cbe7f57dc95fce2399ea93df0dbc
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/migration.py
@@ -0,0 +1 @@
+from .runtime.migration import *  # noqa
diff --git a/venv/Lib/site-packages/alembic/op.py b/venv/Lib/site-packages/alembic/op.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3f5fac0cf5c1e56d44f42051b6d829f7026c86d
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/op.py
@@ -0,0 +1,5 @@
+from .operations.base import Operations
+
+# create proxy functions for
+# each method on the Operations class.
+Operations.create_module_class_proxy(globals(), locals())
diff --git a/venv/Lib/site-packages/alembic/op.pyi b/venv/Lib/site-packages/alembic/op.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..83deac1eb0154050362c9411291bcfe8d64e97c3
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/op.pyi
@@ -0,0 +1,1321 @@
+# ### this file stubs are generated by tools/write_pyi.py - do not edit ###
+# ### imports are manually managed
+from __future__ import annotations
+
+from contextlib import contextmanager
+from typing import Any
+from typing import Awaitable
+from typing import Callable
+from typing import Dict
+from typing import Iterator
+from typing import List
+from typing import Literal
+from typing import Mapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import conv
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.expression import TableClause
+    from sqlalchemy.sql.functions import Function
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Computed
+    from sqlalchemy.sql.schema import Identity
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.type_api import TypeEngine
+    from sqlalchemy.util import immutabledict
+
+    from .operations.base import BatchOperations
+    from .operations.ops import AddColumnOp
+    from .operations.ops import AddConstraintOp
+    from .operations.ops import AlterColumnOp
+    from .operations.ops import AlterTableOp
+    from .operations.ops import BulkInsertOp
+    from .operations.ops import CreateIndexOp
+    from .operations.ops import CreateTableCommentOp
+    from .operations.ops import CreateTableOp
+    from .operations.ops import DropColumnOp
+    from .operations.ops import DropConstraintOp
+    from .operations.ops import DropIndexOp
+    from .operations.ops import DropTableCommentOp
+    from .operations.ops import DropTableOp
+    from .operations.ops import ExecuteSQLOp
+    from .operations.ops import MigrateOperation
+    from .runtime.migration import MigrationContext
+    from .util.sqla_compat import _literal_bindparam
+
+_T = TypeVar("_T")
+_C = TypeVar("_C", bound=Callable[..., Any])
+
+### end imports ###
+
+def add_column(
+    table_name: str, column: Column[Any], *, schema: Optional[str] = None
+) -> None:
+    """Issue an "add column" instruction using the current
+    migration context.
+
+    e.g.::
+
+        from alembic import op
+        from sqlalchemy import Column, String
+
+        op.add_column("organization", Column("name", String()))
+
+    The :meth:`.Operations.add_column` method typically corresponds
+    to the SQL command "ALTER TABLE... ADD COLUMN".    Within the scope
+    of this command, the column's name, datatype, nullability,
+    and optional server-generated defaults may be indicated.
+
+    .. note::
+
+        With the exception of NOT NULL constraints or single-column FOREIGN
+        KEY constraints, other kinds of constraints such as PRIMARY KEY,
+        UNIQUE or CHECK constraints **cannot** be generated using this
+        method; for these constraints, refer to operations such as
+        :meth:`.Operations.create_primary_key` and
+        :meth:`.Operations.create_check_constraint`. In particular, the
+        following :class:`~sqlalchemy.schema.Column` parameters are
+        **ignored**:
+
+        * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
+          typically do not support an ALTER operation that can add
+          individual columns one at a time to an existing primary key
+          constraint, therefore it's less ambiguous to use the
+          :meth:`.Operations.create_primary_key` method, which assumes no
+          existing primary key constraint is present.
+        * :paramref:`~sqlalchemy.schema.Column.unique` - use the
+          :meth:`.Operations.create_unique_constraint` method
+        * :paramref:`~sqlalchemy.schema.Column.index` - use the
+          :meth:`.Operations.create_index` method
+
+
+    The provided :class:`~sqlalchemy.schema.Column` object may include a
+    :class:`~sqlalchemy.schema.ForeignKey` constraint directive,
+    referencing a remote table name. For this specific type of constraint,
+    Alembic will automatically emit a second ALTER statement in order to
+    add the single-column FOREIGN KEY constraint separately::
+
+        from alembic import op
+        from sqlalchemy import Column, INTEGER, ForeignKey
+
+        op.add_column(
+            "organization",
+            Column("account_id", INTEGER, ForeignKey("accounts.id")),
+        )
+
+    The column argument passed to :meth:`.Operations.add_column` is a
+    :class:`~sqlalchemy.schema.Column` construct, used in the same way it's
+    used in SQLAlchemy. In particular, values or functions to be indicated
+    as producing the column's default value on the database side are
+    specified using the ``server_default`` parameter, and not ``default``
+    which only specifies Python-side defaults::
+
+        from alembic import op
+        from sqlalchemy import Column, TIMESTAMP, func
+
+        # specify "DEFAULT NOW" along with the column add
+        op.add_column(
+            "account",
+            Column("timestamp", TIMESTAMP, server_default=func.now()),
+        )
+
+    :param table_name: String name of the parent table.
+    :param column: a :class:`sqlalchemy.schema.Column` object
+     representing the new column.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def alter_column(
+    table_name: str,
+    column_name: str,
+    *,
+    nullable: Optional[bool] = None,
+    comment: Union[str, Literal[False], None] = False,
+    server_default: Any = False,
+    new_column_name: Optional[str] = None,
+    type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
+    existing_type: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
+    existing_server_default: Union[
+        str, bool, Identity, Computed, None
+    ] = False,
+    existing_nullable: Optional[bool] = None,
+    existing_comment: Optional[str] = None,
+    schema: Optional[str] = None,
+    **kw: Any,
+) -> None:
+    r"""Issue an "alter column" instruction using the
+    current migration context.
+
+    Generally, only that aspect of the column which
+    is being changed, i.e. name, type, nullability,
+    default, needs to be specified.  Multiple changes
+    can also be specified at once and the backend should
+    "do the right thing", emitting each change either
+    separately or together as the backend allows.
+
+    MySQL has special requirements here, since MySQL
+    cannot ALTER a column without a full specification.
+    When producing MySQL-compatible migration files,
+    it is recommended that the ``existing_type``,
+    ``existing_server_default``, and ``existing_nullable``
+    parameters be present, if not being altered.
+
+    Type changes which are against the SQLAlchemy
+    "schema" types :class:`~sqlalchemy.types.Boolean`
+    and  :class:`~sqlalchemy.types.Enum` may also
+    add or drop constraints which accompany those
+    types on backends that don't support them natively.
+    The ``existing_type`` argument is
+    used in this case to identify and remove a previous
+    constraint that was bound to the type object.
+
+    :param table_name: string name of the target table.
+    :param column_name: string name of the target column,
+     as it exists before the operation begins.
+    :param nullable: Optional; specify ``True`` or ``False``
+     to alter the column's nullability.
+    :param server_default: Optional; specify a string
+     SQL expression, :func:`~sqlalchemy.sql.expression.text`,
+     or :class:`~sqlalchemy.schema.DefaultClause` to indicate
+     an alteration to the column's default value.
+     Set to ``None`` to have the default removed.
+    :param comment: optional string text of a new comment to add to the
+     column.
+    :param new_column_name: Optional; specify a string name here to
+     indicate the new name within a column rename operation.
+    :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
+     type object to specify a change to the column's type.
+     For SQLAlchemy types that also indicate a constraint (i.e.
+     :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
+     the constraint is also generated.
+    :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
+     currently understood by the MySQL dialect.
+    :param existing_type: Optional; a
+     :class:`~sqlalchemy.types.TypeEngine`
+     type object to specify the previous type.   This
+     is required for all MySQL column alter operations that
+     don't otherwise specify a new type, as well as for
+     when nullability is being changed on a SQL Server
+     column.  It is also used if the type is a so-called
+     SQLAlchemy "schema" type which may define a constraint (i.e.
+     :class:`~sqlalchemy.types.Boolean`,
+     :class:`~sqlalchemy.types.Enum`),
+     so that the constraint can be dropped.
+    :param existing_server_default: Optional; The existing
+     default value of the column.   Required on MySQL if
+     an existing default is not being changed; else MySQL
+     removes the default.
+    :param existing_nullable: Optional; the existing nullability
+     of the column.  Required on MySQL if the existing nullability
+     is not being changed; else MySQL sets this to NULL.
+    :param existing_autoincrement: Optional; the existing autoincrement
+     of the column.  Used for MySQL's system of altering a column
+     that specifies ``AUTO_INCREMENT``.
+    :param existing_comment: string text of the existing comment on the
+     column to be maintained.  Required on MySQL if the existing comment
+     on the column is not being changed.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param postgresql_using: String argument which will indicate a
+     SQL expression to render within the Postgresql-specific USING clause
+     within ALTER COLUMN.    This string is taken directly as raw SQL which
+     must explicitly include any necessary quoting or escaping of tokens
+     within the expression.
+
+    """
+
+@contextmanager
+def batch_alter_table(
+    table_name: str,
+    schema: Optional[str] = None,
+    recreate: Literal["auto", "always", "never"] = "auto",
+    partial_reordering: Optional[Tuple[Any, ...]] = None,
+    copy_from: Optional[Table] = None,
+    table_args: Tuple[Any, ...] = (),
+    table_kwargs: Mapping[str, Any] = immutabledict({}),
+    reflect_args: Tuple[Any, ...] = (),
+    reflect_kwargs: Mapping[str, Any] = immutabledict({}),
+    naming_convention: Optional[Dict[str, str]] = None,
+) -> Iterator[BatchOperations]:
+    """Invoke a series of per-table migrations in batch.
+
+    Batch mode allows a series of operations specific to a table
+    to be syntactically grouped together, and allows for alternate
+    modes of table migration, in particular the "recreate" style of
+    migration required by SQLite.
+
+    "recreate" style is as follows:
+
+    1. A new table is created with the new specification, based on the
+       migration directives within the batch, using a temporary name.
+
+    2. the data copied from the existing table to the new table.
+
+    3. the existing table is dropped.
+
+    4. the new table is renamed to the existing table name.
+
+    The directive by default will only use "recreate" style on the
+    SQLite backend, and only if directives are present which require
+    this form, e.g. anything other than ``add_column()``.   The batch
+    operation on other backends will proceed using standard ALTER TABLE
+    operations.
+
+    The method is used as a context manager, which returns an instance
+    of :class:`.BatchOperations`; this object is the same as
+    :class:`.Operations` except that table names and schema names
+    are omitted.  E.g.::
+
+        with op.batch_alter_table("some_table") as batch_op:
+            batch_op.add_column(Column("foo", Integer))
+            batch_op.drop_column("bar")
+
+    The operations within the context manager are invoked at once
+    when the context is ended.   When run against SQLite, if the
+    migrations include operations not supported by SQLite's ALTER TABLE,
+    the entire table will be copied to a new one with the new
+    specification, moving all data across as well.
+
+    The copy operation by default uses reflection to retrieve the current
+    structure of the table, and therefore :meth:`.batch_alter_table`
+    in this mode requires that the migration is run in "online" mode.
+    The ``copy_from`` parameter may be passed which refers to an existing
+    :class:`.Table` object, which will bypass this reflection step.
+
+    .. note::  The table copy operation will currently not copy
+       CHECK constraints, and may not copy UNIQUE constraints that are
+       unnamed, as is possible on SQLite.   See the section
+       :ref:`sqlite_batch_constraints` for workarounds.
+
+    :param table_name: name of table
+    :param schema: optional schema name.
+    :param recreate: under what circumstances the table should be
+     recreated. At its default of ``"auto"``, the SQLite dialect will
+     recreate the table if any operations other than ``add_column()``,
+     ``create_index()``, or ``drop_index()`` are
+     present. Other options include ``"always"`` and ``"never"``.
+    :param copy_from: optional :class:`~sqlalchemy.schema.Table` object
+     that will act as the structure of the table being copied.  If omitted,
+     table reflection is used to retrieve the structure of the table.
+
+     .. seealso::
+
+        :ref:`batch_offline_mode`
+
+        :paramref:`~.Operations.batch_alter_table.reflect_args`
+
+        :paramref:`~.Operations.batch_alter_table.reflect_kwargs`
+
+    :param reflect_args: a sequence of additional positional arguments that
+     will be applied to the table structure being reflected / copied;
+     this may be used to pass column and constraint overrides to the
+     table that will be reflected, in lieu of passing the whole
+     :class:`~sqlalchemy.schema.Table` using
+     :paramref:`~.Operations.batch_alter_table.copy_from`.
+    :param reflect_kwargs: a dictionary of additional keyword arguments
+     that will be applied to the table structure being copied; this may be
+     used to pass additional table and reflection options to the table that
+     will be reflected, in lieu of passing the whole
+     :class:`~sqlalchemy.schema.Table` using
+     :paramref:`~.Operations.batch_alter_table.copy_from`.
+    :param table_args: a sequence of additional positional arguments that
+     will be applied to the new :class:`~sqlalchemy.schema.Table` when
+     created, in addition to those copied from the source table.
+     This may be used to provide additional constraints such as CHECK
+     constraints that may not be reflected.
+    :param table_kwargs: a dictionary of additional keyword arguments
+     that will be applied to the new :class:`~sqlalchemy.schema.Table`
+     when created, in addition to those copied from the source table.
+     This may be used to provide for additional table options that may
+     not be reflected.
+    :param naming_convention: a naming convention dictionary of the form
+     described at :ref:`autogen_naming_conventions` which will be applied
+     to the :class:`~sqlalchemy.schema.MetaData` during the reflection
+     process.  This is typically required if one wants to drop SQLite
+     constraints, as these constraints will not have names when
+     reflected on this backend.  Requires SQLAlchemy **0.9.4** or greater.
+
+     .. seealso::
+
+        :ref:`dropping_sqlite_foreign_keys`
+
+    :param partial_reordering: a list of tuples, each suggesting a desired
+     ordering of two or more columns in the newly created table.  Requires
+     that :paramref:`.batch_alter_table.recreate` is set to ``"always"``.
+     Examples, given a table with columns "a", "b", "c", and "d":
+
+     Specify the order of all columns::
+
+        with op.batch_alter_table(
+            "some_table",
+            recreate="always",
+            partial_reordering=[("c", "d", "a", "b")],
+        ) as batch_op:
+            pass
+
+     Ensure "d" appears before "c", and "b", appears before "a"::
+
+        with op.batch_alter_table(
+            "some_table",
+            recreate="always",
+            partial_reordering=[("d", "c"), ("b", "a")],
+        ) as batch_op:
+            pass
+
+     The ordering of columns not included in the partial_reordering
+     set is undefined.   Therefore it is best to specify the complete
+     ordering of all columns for best results.
+
+    .. note:: batch mode requires SQLAlchemy 0.8 or above.
+
+    .. seealso::
+
+        :ref:`batch_migrations`
+
+    """
+
+def bulk_insert(
+    table: Union[Table, TableClause],
+    rows: List[Dict[str, Any]],
+    *,
+    multiinsert: bool = True,
+) -> None:
+    """Issue a "bulk insert" operation using the current
+    migration context.
+
+    This provides a means of representing an INSERT of multiple rows
+    which works equally well in the context of executing on a live
+    connection as well as that of generating a SQL script.   In the
+    case of a SQL script, the values are rendered inline into the
+    statement.
+
+    e.g.::
+
+        from alembic import op
+        from datetime import date
+        from sqlalchemy.sql import table, column
+        from sqlalchemy import String, Integer, Date
+
+        # Create an ad-hoc table to use for the insert statement.
+        accounts_table = table(
+            "account",
+            column("id", Integer),
+            column("name", String),
+            column("create_date", Date),
+        )
+
+        op.bulk_insert(
+            accounts_table,
+            [
+                {
+                    "id": 1,
+                    "name": "John Smith",
+                    "create_date": date(2010, 10, 5),
+                },
+                {
+                    "id": 2,
+                    "name": "Ed Williams",
+                    "create_date": date(2007, 5, 27),
+                },
+                {
+                    "id": 3,
+                    "name": "Wendy Jones",
+                    "create_date": date(2008, 8, 15),
+                },
+            ],
+        )
+
+    When using --sql mode, some datatypes may not render inline
+    automatically, such as dates and other special types.   When this
+    issue is present, :meth:`.Operations.inline_literal` may be used::
+
+        op.bulk_insert(
+            accounts_table,
+            [
+                {
+                    "id": 1,
+                    "name": "John Smith",
+                    "create_date": op.inline_literal("2010-10-05"),
+                },
+                {
+                    "id": 2,
+                    "name": "Ed Williams",
+                    "create_date": op.inline_literal("2007-05-27"),
+                },
+                {
+                    "id": 3,
+                    "name": "Wendy Jones",
+                    "create_date": op.inline_literal("2008-08-15"),
+                },
+            ],
+            multiinsert=False,
+        )
+
+    When using :meth:`.Operations.inline_literal` in conjunction with
+    :meth:`.Operations.bulk_insert`, in order for the statement to work
+    in "online" (e.g. non --sql) mode, the
+    :paramref:`~.Operations.bulk_insert.multiinsert`
+    flag should be set to ``False``, which will have the effect of
+    individual INSERT statements being emitted to the database, each
+    with a distinct VALUES clause, so that the "inline" values can
+    still be rendered, rather than attempting to pass the values
+    as bound parameters.
+
+    :param table: a table object which represents the target of the INSERT.
+
+    :param rows: a list of dictionaries indicating rows.
+
+    :param multiinsert: when at its default of True and --sql mode is not
+       enabled, the INSERT statement will be executed using
+       "executemany()" style, where all elements in the list of
+       dictionaries are passed as bound parameters in a single
+       list.   Setting this to False results in individual INSERT
+       statements being emitted per parameter set, and is needed
+       in those cases where non-literal values are present in the
+       parameter sets.
+
+    """
+
+def create_check_constraint(
+    constraint_name: Optional[str],
+    table_name: str,
+    condition: Union[str, ColumnElement[bool], TextClause],
+    *,
+    schema: Optional[str] = None,
+    **kw: Any,
+) -> None:
+    """Issue a "create check constraint" instruction using the
+    current migration context.
+
+    e.g.::
+
+        from alembic import op
+        from sqlalchemy.sql import column, func
+
+        op.create_check_constraint(
+            "ck_user_name_len",
+            "user",
+            func.len(column("name")) > 5,
+        )
+
+    CHECK constraints are usually against a SQL expression, so ad-hoc
+    table metadata is usually needed.   The function will convert the given
+    arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
+    to an anonymous table in order to emit the CREATE statement.
+
+    :param name: Name of the check constraint.  The name is necessary
+     so that an ALTER statement can be emitted.  For setups that
+     use an automated naming scheme such as that described at
+     :ref:`sqla:constraint_naming_conventions`,
+     ``name`` here can be ``None``, as the event listener will
+     apply the name to the constraint object when it is associated
+     with the table.
+    :param table_name: String name of the source table.
+    :param condition: SQL expression that's the condition of the
+     constraint. Can be a string or SQLAlchemy expression language
+     structure.
+    :param deferrable: optional bool. If set, emit DEFERRABLE or
+     NOT DEFERRABLE when issuing DDL for this constraint.
+    :param initially: optional string. If set, emit INITIALLY <value>
+     when issuing DDL for this constraint.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def create_exclude_constraint(
+    constraint_name: str, table_name: str, *elements: Any, **kw: Any
+) -> Optional[Table]:
+    """Issue an alter to create an EXCLUDE constraint using the
+    current migration context.
+
+    .. note::  This method is Postgresql specific, and additionally
+       requires at least SQLAlchemy 1.0.
+
+    e.g.::
+
+        from alembic import op
+
+        op.create_exclude_constraint(
+            "user_excl",
+            "user",
+            ("period", "&&"),
+            ("group", "="),
+            where=("group != 'some group'"),
+        )
+
+    Note that the expressions work the same way as that of
+    the ``ExcludeConstraint`` object itself; if plain strings are
+    passed, quoting rules must be applied manually.
+
+    :param name: Name of the constraint.
+    :param table_name: String name of the source table.
+    :param elements: exclude conditions.
+    :param where: SQL expression or SQL string with optional WHERE
+     clause.
+    :param deferrable: optional bool. If set, emit DEFERRABLE or
+     NOT DEFERRABLE when issuing DDL for this constraint.
+    :param initially: optional string. If set, emit INITIALLY <value>
+     when issuing DDL for this constraint.
+    :param schema: Optional schema name to operate within.
+
+    """
+
+def create_foreign_key(
+    constraint_name: Optional[str],
+    source_table: str,
+    referent_table: str,
+    local_cols: List[str],
+    remote_cols: List[str],
+    *,
+    onupdate: Optional[str] = None,
+    ondelete: Optional[str] = None,
+    deferrable: Optional[bool] = None,
+    initially: Optional[str] = None,
+    match: Optional[str] = None,
+    source_schema: Optional[str] = None,
+    referent_schema: Optional[str] = None,
+    **dialect_kw: Any,
+) -> None:
+    """Issue a "create foreign key" instruction using the
+    current migration context.
+
+    e.g.::
+
+        from alembic import op
+
+        op.create_foreign_key(
+            "fk_user_address",
+            "address",
+            "user",
+            ["user_id"],
+            ["id"],
+        )
+
+    This internally generates a :class:`~sqlalchemy.schema.Table` object
+    containing the necessary columns, then generates a new
+    :class:`~sqlalchemy.schema.ForeignKeyConstraint`
+    object which it then associates with the
+    :class:`~sqlalchemy.schema.Table`.
+    Any event listeners associated with this action will be fired
+    off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+    construct is ultimately used to generate the ALTER statement.
+
+    :param constraint_name: Name of the foreign key constraint.  The name
+     is necessary so that an ALTER statement can be emitted.  For setups
+     that use an automated naming scheme such as that described at
+     :ref:`sqla:constraint_naming_conventions`,
+     ``name`` here can be ``None``, as the event listener will
+     apply the name to the constraint object when it is associated
+     with the table.
+    :param source_table: String name of the source table.
+    :param referent_table: String name of the destination table.
+    :param local_cols: a list of string column names in the
+     source table.
+    :param remote_cols: a list of string column names in the
+     remote table.
+    :param onupdate: Optional string. If set, emit ON UPDATE <value> when
+     issuing DDL for this constraint. Typical values include CASCADE,
+     DELETE and RESTRICT.
+    :param ondelete: Optional string. If set, emit ON DELETE <value> when
+     issuing DDL for this constraint. Typical values include CASCADE,
+     DELETE and RESTRICT.
+    :param deferrable: optional bool. If set, emit DEFERRABLE or NOT
+     DEFERRABLE when issuing DDL for this constraint.
+    :param source_schema: Optional schema name of the source table.
+    :param referent_schema: Optional schema name of the destination table.
+
+    """
+
+def create_index(
+    index_name: Optional[str],
+    table_name: str,
+    columns: Sequence[Union[str, TextClause, Function[Any]]],
+    *,
+    schema: Optional[str] = None,
+    unique: bool = False,
+    if_not_exists: Optional[bool] = None,
+    **kw: Any,
+) -> None:
+    r"""Issue a "create index" instruction using the current
+    migration context.
+
+    e.g.::
+
+        from alembic import op
+
+        op.create_index("ik_test", "t1", ["foo", "bar"])
+
+    Functional indexes can be produced by using the
+    :func:`sqlalchemy.sql.expression.text` construct::
+
+        from alembic import op
+        from sqlalchemy import text
+
+        op.create_index("ik_test", "t1", [text("lower(foo)")])
+
+    :param index_name: name of the index.
+    :param table_name: name of the owning table.
+    :param columns: a list consisting of string column names and/or
+     :func:`~sqlalchemy.sql.expression.text` constructs.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param unique: If True, create a unique index.
+
+    :param quote: Force quoting of this column's name on or off,
+     corresponding to ``True`` or ``False``. When left at its default
+     of ``None``, the column identifier will be quoted according to
+     whether the name is case sensitive (identifiers with at least one
+     upper case character are treated as case sensitive), or if it's a
+     reserved word. This flag is only needed to force quoting of a
+     reserved word which is not known by the SQLAlchemy dialect.
+
+    :param if_not_exists: If True, adds IF NOT EXISTS operator when
+     creating the new index.
+
+     .. versionadded:: 1.12.0
+
+    :param \**kw: Additional keyword arguments not mentioned above are
+     dialect specific, and passed in the form
+     ``<dialectname>_<argname>``.
+     See the documentation regarding an individual dialect at
+     :ref:`dialect_toplevel` for detail on documented arguments.
+
+    """
+
+def create_primary_key(
+    constraint_name: Optional[str],
+    table_name: str,
+    columns: List[str],
+    *,
+    schema: Optional[str] = None,
+) -> None:
+    """Issue a "create primary key" instruction using the current
+    migration context.
+
+    e.g.::
+
+        from alembic import op
+
+        op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
+
+    This internally generates a :class:`~sqlalchemy.schema.Table` object
+    containing the necessary columns, then generates a new
+    :class:`~sqlalchemy.schema.PrimaryKeyConstraint`
+    object which it then associates with the
+    :class:`~sqlalchemy.schema.Table`.
+    Any event listeners associated with this action will be fired
+    off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+    construct is ultimately used to generate the ALTER statement.
+
+    :param constraint_name: Name of the primary key constraint.  The name
+     is necessary so that an ALTER statement can be emitted.  For setups
+     that use an automated naming scheme such as that described at
+     :ref:`sqla:constraint_naming_conventions`
+     ``name`` here can be ``None``, as the event listener will
+     apply the name to the constraint object when it is associated
+     with the table.
+    :param table_name: String name of the target table.
+    :param columns: a list of string column names to be applied to the
+     primary key constraint.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def create_table(table_name: str, *columns: SchemaItem, **kw: Any) -> Table:
+    r"""Issue a "create table" instruction using the current migration
+    context.
+
+    This directive receives an argument list similar to that of the
+    traditional :class:`sqlalchemy.schema.Table` construct, but without the
+    metadata::
+
+        from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+        from alembic import op
+
+        op.create_table(
+            "account",
+            Column("id", INTEGER, primary_key=True),
+            Column("name", VARCHAR(50), nullable=False),
+            Column("description", NVARCHAR(200)),
+            Column("timestamp", TIMESTAMP, server_default=func.now()),
+        )
+
+    Note that :meth:`.create_table` accepts
+    :class:`~sqlalchemy.schema.Column`
+    constructs directly from the SQLAlchemy library.  In particular,
+    default values to be created on the database side are
+    specified using the ``server_default`` parameter, and not
+    ``default`` which only specifies Python-side defaults::
+
+        from alembic import op
+        from sqlalchemy import Column, TIMESTAMP, func
+
+        # specify "DEFAULT NOW" along with the "timestamp" column
+        op.create_table(
+            "account",
+            Column("id", INTEGER, primary_key=True),
+            Column("timestamp", TIMESTAMP, server_default=func.now()),
+        )
+
+    The function also returns a newly created
+    :class:`~sqlalchemy.schema.Table` object, corresponding to the table
+    specification given, which is suitable for
+    immediate SQL operations, in particular
+    :meth:`.Operations.bulk_insert`::
+
+        from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+        from alembic import op
+
+        account_table = op.create_table(
+            "account",
+            Column("id", INTEGER, primary_key=True),
+            Column("name", VARCHAR(50), nullable=False),
+            Column("description", NVARCHAR(200)),
+            Column("timestamp", TIMESTAMP, server_default=func.now()),
+        )
+
+        op.bulk_insert(
+            account_table,
+            [
+                {"name": "A1", "description": "account 1"},
+                {"name": "A2", "description": "account 2"},
+            ],
+        )
+
+    :param table_name: Name of the table
+    :param \*columns: collection of :class:`~sqlalchemy.schema.Column`
+     objects within
+     the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
+     objects
+     and :class:`~.sqlalchemy.schema.Index` objects.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param \**kw: Other keyword arguments are passed to the underlying
+     :class:`sqlalchemy.schema.Table` object created for the command.
+
+    :return: the :class:`~sqlalchemy.schema.Table` object corresponding
+     to the parameters given.
+
+    """
+
+def create_table_comment(
+    table_name: str,
+    comment: Optional[str],
+    *,
+    existing_comment: Optional[str] = None,
+    schema: Optional[str] = None,
+) -> None:
+    """Emit a COMMENT ON operation to set the comment for a table.
+
+    :param table_name: string name of the target table.
+    :param comment: string value of the comment being registered against
+     the specified table.
+    :param existing_comment: String value of a comment
+     already registered on the specified table, used within autogenerate
+     so that the operation is reversible, but not required for direct
+     use.
+
+    .. seealso::
+
+        :meth:`.Operations.drop_table_comment`
+
+        :paramref:`.Operations.alter_column.comment`
+
+    """
+
+def create_unique_constraint(
+    constraint_name: Optional[str],
+    table_name: str,
+    columns: Sequence[str],
+    *,
+    schema: Optional[str] = None,
+    **kw: Any,
+) -> Any:
+    """Issue a "create unique constraint" instruction using the
+    current migration context.
+
+    e.g.::
+
+        from alembic import op
+        op.create_unique_constraint("uq_user_name", "user", ["name"])
+
+    This internally generates a :class:`~sqlalchemy.schema.Table` object
+    containing the necessary columns, then generates a new
+    :class:`~sqlalchemy.schema.UniqueConstraint`
+    object which it then associates with the
+    :class:`~sqlalchemy.schema.Table`.
+    Any event listeners associated with this action will be fired
+    off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+    construct is ultimately used to generate the ALTER statement.
+
+    :param name: Name of the unique constraint.  The name is necessary
+     so that an ALTER statement can be emitted.  For setups that
+     use an automated naming scheme such as that described at
+     :ref:`sqla:constraint_naming_conventions`,
+     ``name`` here can be ``None``, as the event listener will
+     apply the name to the constraint object when it is associated
+     with the table.
+    :param table_name: String name of the source table.
+    :param columns: a list of string column names in the
+     source table.
+    :param deferrable: optional bool. If set, emit DEFERRABLE or
+     NOT DEFERRABLE when issuing DDL for this constraint.
+    :param initially: optional string. If set, emit INITIALLY <value>
+     when issuing DDL for this constraint.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def drop_column(
+    table_name: str,
+    column_name: str,
+    *,
+    schema: Optional[str] = None,
+    **kw: Any,
+) -> None:
+    """Issue a "drop column" instruction using the current
+    migration context.
+
+    e.g.::
+
+        drop_column("organization", "account_id")
+
+    :param table_name: name of table
+    :param column_name: name of column
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param mssql_drop_check: Optional boolean.  When ``True``, on
+     Microsoft SQL Server only, first
+     drop the CHECK constraint on the column using a
+     SQL-script-compatible
+     block that selects into a @variable from sys.check_constraints,
+     then exec's a separate DROP CONSTRAINT for that constraint.
+    :param mssql_drop_default: Optional boolean.  When ``True``, on
+     Microsoft SQL Server only, first
+     drop the DEFAULT constraint on the column using a
+     SQL-script-compatible
+     block that selects into a @variable from sys.default_constraints,
+     then exec's a separate DROP CONSTRAINT for that default.
+    :param mssql_drop_foreign_key: Optional boolean.  When ``True``, on
+     Microsoft SQL Server only, first
+     drop a single FOREIGN KEY constraint on the column using a
+     SQL-script-compatible
+     block that selects into a @variable from
+     sys.foreign_keys/sys.foreign_key_columns,
+     then exec's a separate DROP CONSTRAINT for that default.  Only
+     works if the column has exactly one FK constraint which refers to
+     it, at the moment.
+
+    """
+
+def drop_constraint(
+    constraint_name: str,
+    table_name: str,
+    type_: Optional[str] = None,
+    *,
+    schema: Optional[str] = None,
+) -> None:
+    r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
+
+    :param constraint_name: name of the constraint.
+    :param table_name: table name.
+    :param type\_: optional, required on MySQL.  can be
+     'foreignkey', 'primary', 'unique', or 'check'.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def drop_index(
+    index_name: str,
+    table_name: Optional[str] = None,
+    *,
+    schema: Optional[str] = None,
+    if_exists: Optional[bool] = None,
+    **kw: Any,
+) -> None:
+    r"""Issue a "drop index" instruction using the current
+    migration context.
+
+    e.g.::
+
+        drop_index("accounts")
+
+    :param index_name: name of the index.
+    :param table_name: name of the owning table.  Some
+     backends such as Microsoft SQL Server require this.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    :param if_exists: If True, adds IF EXISTS operator when
+     dropping the index.
+
+     .. versionadded:: 1.12.0
+
+    :param \**kw: Additional keyword arguments not mentioned above are
+     dialect specific, and passed in the form
+     ``<dialectname>_<argname>``.
+     See the documentation regarding an individual dialect at
+     :ref:`dialect_toplevel` for detail on documented arguments.
+
+    """
+
+def drop_table(
+    table_name: str, *, schema: Optional[str] = None, **kw: Any
+) -> None:
+    r"""Issue a "drop table" instruction using the current
+    migration context.
+
+
+    e.g.::
+
+        drop_table("accounts")
+
+    :param table_name: Name of the table
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+    :param \**kw: Other keyword arguments are passed to the underlying
+     :class:`sqlalchemy.schema.Table` object created for the command.
+
+    """
+
+def drop_table_comment(
+    table_name: str,
+    *,
+    existing_comment: Optional[str] = None,
+    schema: Optional[str] = None,
+) -> None:
+    """Issue a "drop table comment" operation to
+    remove an existing comment set on a table.
+
+    :param table_name: string name of the target table.
+    :param existing_comment: An optional string value of a comment already
+     registered on the specified table.
+
+    .. seealso::
+
+        :meth:`.Operations.create_table_comment`
+
+        :paramref:`.Operations.alter_column.comment`
+
+    """
+
+def execute(
+    sqltext: Union[Executable, str],
+    *,
+    execution_options: Optional[dict[str, Any]] = None,
+) -> None:
+    r"""Execute the given SQL using the current migration context.
+
+    The given SQL can be a plain string, e.g.::
+
+        op.execute("INSERT INTO table (foo) VALUES ('some value')")
+
+    Or it can be any kind of Core SQL Expression construct, such as
+    below where we use an update construct::
+
+        from sqlalchemy.sql import table, column
+        from sqlalchemy import String
+        from alembic import op
+
+        account = table("account", column("name", String))
+        op.execute(
+            account.update()
+            .where(account.c.name == op.inline_literal("account 1"))
+            .values({"name": op.inline_literal("account 2")})
+        )
+
+    Above, we made use of the SQLAlchemy
+    :func:`sqlalchemy.sql.expression.table` and
+    :func:`sqlalchemy.sql.expression.column` constructs to make a brief,
+    ad-hoc table construct just for our UPDATE statement.  A full
+    :class:`~sqlalchemy.schema.Table` construct of course works perfectly
+    fine as well, though note it's a recommended practice to at least
+    ensure the definition of a table is self-contained within the migration
+    script, rather than imported from a module that may break compatibility
+    with older migrations.
+
+    In a SQL script context, the statement is emitted directly to the
+    output stream.   There is *no* return result, however, as this
+    function is oriented towards generating a change script
+    that can run in "offline" mode.     Additionally, parameterized
+    statements are discouraged here, as they *will not work* in offline
+    mode.  Above, we use :meth:`.inline_literal` where parameters are
+    to be used.
+
+    For full interaction with a connected database where parameters can
+    also be used normally, use the "bind" available from the context::
+
+        from alembic import op
+
+        connection = op.get_bind()
+
+        connection.execute(
+            account.update()
+            .where(account.c.name == "account 1")
+            .values({"name": "account 2"})
+        )
+
+    Additionally, when passing the statement as a plain string, it is first
+    coerced into a :func:`sqlalchemy.sql.expression.text` construct
+    before being passed along.  In the less likely case that the
+    literal SQL string contains a colon, it must be escaped with a
+    backslash, as::
+
+       op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
+
+
+    :param sqltext: Any legal SQLAlchemy expression, including:
+
+    * a string
+    * a :func:`sqlalchemy.sql.expression.text` construct.
+    * a :func:`sqlalchemy.sql.expression.insert` construct.
+    * a :func:`sqlalchemy.sql.expression.update` construct.
+    * a :func:`sqlalchemy.sql.expression.delete` construct.
+    * Any "executable" described in SQLAlchemy Core documentation,
+      noting that no result set is returned.
+
+    .. note::  when passing a plain string, the statement is coerced into
+       a :func:`sqlalchemy.sql.expression.text` construct. This construct
+       considers symbols with colons, e.g. ``:foo`` to be bound parameters.
+       To avoid this, ensure that colon symbols are escaped, e.g.
+       ``\:foo``.
+
+    :param execution_options: Optional dictionary of
+     execution options, will be passed to
+     :meth:`sqlalchemy.engine.Connection.execution_options`.
+    """
+
+def f(name: str) -> conv:
+    """Indicate a string name that has already had a naming convention
+    applied to it.
+
+    This feature combines with the SQLAlchemy ``naming_convention`` feature
+    to disambiguate constraint names that have already had naming
+    conventions applied to them, versus those that have not.  This is
+    necessary in the case that the ``"%(constraint_name)s"`` token
+    is used within a naming convention, so that it can be identified
+    that this particular name should remain fixed.
+
+    If the :meth:`.Operations.f` is used on a constraint, the naming
+    convention will not take effect::
+
+        op.add_column("t", "x", Boolean(name=op.f("ck_bool_t_x")))
+
+    Above, the CHECK constraint generated will have the name
+    ``ck_bool_t_x`` regardless of whether or not a naming convention is
+    in use.
+
+    Alternatively, if a naming convention is in use, and 'f' is not used,
+    names will be converted along conventions.  If the ``target_metadata``
+    contains the naming convention
+    ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the
+    output of the following:
+
+        op.add_column("t", "x", Boolean(name="x"))
+
+    will be::
+
+        CONSTRAINT ck_bool_t_x CHECK (x in (1, 0)))
+
+    The function is rendered in the output of autogenerate when
+    a particular constraint name is already converted.
+
+    """
+
+def get_bind() -> Connection:
+    """Return the current 'bind'.
+
+    Under normal circumstances, this is the
+    :class:`~sqlalchemy.engine.Connection` currently being used
+    to emit SQL to the database.
+
+    In a SQL script context, this value is ``None``. [TODO: verify this]
+
+    """
+
+def get_context() -> MigrationContext:
+    """Return the :class:`.MigrationContext` object that's
+    currently in use.
+
+    """
+
+def implementation_for(op_cls: Any) -> Callable[[_C], _C]:
+    """Register an implementation for a given :class:`.MigrateOperation`.
+
+    This is part of the operation extensibility API.
+
+    .. seealso::
+
+        :ref:`operation_plugins` - example of use
+
+    """
+
+def inline_literal(
+    value: Union[str, int], type_: Optional[TypeEngine[Any]] = None
+) -> _literal_bindparam:
+    r"""Produce an 'inline literal' expression, suitable for
+    using in an INSERT, UPDATE, or DELETE statement.
+
+    When using Alembic in "offline" mode, CRUD operations
+    aren't compatible with SQLAlchemy's default behavior surrounding
+    literal values,
+    which is that they are converted into bound values and passed
+    separately into the ``execute()`` method of the DBAPI cursor.
+    An offline SQL
+    script needs to have these rendered inline.  While it should
+    always be noted that inline literal values are an **enormous**
+    security hole in an application that handles untrusted input,
+    a schema migration is not run in this context, so
+    literals are safe to render inline, with the caveat that
+    advanced types like dates may not be supported directly
+    by SQLAlchemy.
+
+    See :meth:`.Operations.execute` for an example usage of
+    :meth:`.Operations.inline_literal`.
+
+    The environment can also be configured to attempt to render
+    "literal" values inline automatically, for those simple types
+    that are supported by the dialect; see
+    :paramref:`.EnvironmentContext.configure.literal_binds` for this
+    more recently added feature.
+
+    :param value: The value to render.  Strings, integers, and simple
+     numerics should be supported.   Other types like boolean,
+     dates, etc. may or may not be supported yet by various
+     backends.
+    :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine`
+     subclass stating the type of this value.  In SQLAlchemy
+     expressions, this is usually derived automatically
+     from the Python type of the value itself, as well as
+     based on the context in which the value is used.
+
+    .. seealso::
+
+        :paramref:`.EnvironmentContext.configure.literal_binds`
+
+    """
+
+@overload
+def invoke(operation: CreateTableOp) -> Table: ...
+@overload
+def invoke(
+    operation: Union[
+        AddConstraintOp,
+        DropConstraintOp,
+        CreateIndexOp,
+        DropIndexOp,
+        AddColumnOp,
+        AlterColumnOp,
+        AlterTableOp,
+        CreateTableCommentOp,
+        DropTableCommentOp,
+        DropColumnOp,
+        BulkInsertOp,
+        DropTableOp,
+        ExecuteSQLOp,
+    ]
+) -> None: ...
+@overload
+def invoke(operation: MigrateOperation) -> Any:
+    """Given a :class:`.MigrateOperation`, invoke it in terms of
+    this :class:`.Operations` instance.
+
+    """
+
+def register_operation(
+    name: str, sourcename: Optional[str] = None
+) -> Callable[[Type[_T]], Type[_T]]:
+    """Register a new operation for this class.
+
+    This method is normally used to add new operations
+    to the :class:`.Operations` class, and possibly the
+    :class:`.BatchOperations` class as well.   All Alembic migration
+    operations are implemented via this system, however the system
+    is also available as a public API to facilitate adding custom
+    operations.
+
+    .. seealso::
+
+        :ref:`operation_plugins`
+
+
+    """
+
+def rename_table(
+    old_table_name: str, new_table_name: str, *, schema: Optional[str] = None
+) -> None:
+    """Emit an ALTER TABLE to rename a table.
+
+    :param old_table_name: old name.
+    :param new_table_name: new name.
+    :param schema: Optional schema name to operate within.  To control
+     quoting of the schema outside of the default behavior, use
+     the SQLAlchemy construct
+     :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+    """
+
+def run_async(
+    async_function: Callable[..., Awaitable[_T]], *args: Any, **kw_args: Any
+) -> _T:
+    """Invoke the given asynchronous callable, passing an asynchronous
+    :class:`~sqlalchemy.ext.asyncio.AsyncConnection` as the first
+    argument.
+
+    This method allows calling async functions from within the
+    synchronous ``upgrade()`` or ``downgrade()`` alembic migration
+    method.
+
+    The async connection passed to the callable shares the same
+    transaction as the connection running in the migration context.
+
+    Any additional arg or kw_arg passed to this function are passed
+    to the provided async function.
+
+    .. versionadded: 1.11
+
+    .. note::
+
+        This method can be called only when alembic is called using
+        an async dialect.
+    """
diff --git a/venv/Lib/site-packages/alembic/operations/__init__.py b/venv/Lib/site-packages/alembic/operations/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..26197cbe8205decca224757d329e634a6a23d2e2
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/operations/__init__.py
@@ -0,0 +1,15 @@
+from . import toimpl
+from .base import AbstractOperations
+from .base import BatchOperations
+from .base import Operations
+from .ops import MigrateOperation
+from .ops import MigrationScript
+
+
+__all__ = [
+    "AbstractOperations",
+    "Operations",
+    "BatchOperations",
+    "MigrateOperation",
+    "MigrationScript",
+]
diff --git a/venv/Lib/site-packages/alembic/operations/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/operations/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e62c395b262e17ffc96f5f54696a00f9adaf915
Binary files /dev/null and b/venv/Lib/site-packages/alembic/operations/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/operations/__pycache__/base.cpython-311.pyc b/venv/Lib/site-packages/alembic/operations/__pycache__/base.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..037faee092a35cbf4b0e4daa6e52717197157545
Binary files /dev/null and b/venv/Lib/site-packages/alembic/operations/__pycache__/base.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/operations/__pycache__/batch.cpython-311.pyc b/venv/Lib/site-packages/alembic/operations/__pycache__/batch.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb2da30002a71ce4c43752ed7f4560dccab7540a
Binary files /dev/null and b/venv/Lib/site-packages/alembic/operations/__pycache__/batch.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/operations/__pycache__/ops.cpython-311.pyc b/venv/Lib/site-packages/alembic/operations/__pycache__/ops.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d08ff9bc22515c7d694791c09dec70d1dea2ca96
Binary files /dev/null and b/venv/Lib/site-packages/alembic/operations/__pycache__/ops.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/operations/__pycache__/schemaobj.cpython-311.pyc b/venv/Lib/site-packages/alembic/operations/__pycache__/schemaobj.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c0fd7b145b0240c87275a5a0e448a9d87262d6f
Binary files /dev/null and b/venv/Lib/site-packages/alembic/operations/__pycache__/schemaobj.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/operations/__pycache__/toimpl.cpython-311.pyc b/venv/Lib/site-packages/alembic/operations/__pycache__/toimpl.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37a37685e9dd806d437901cc3bd380d9bec9c370
Binary files /dev/null and b/venv/Lib/site-packages/alembic/operations/__pycache__/toimpl.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/operations/base.py b/venv/Lib/site-packages/alembic/operations/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..bafe441a69ceb2bcd13f5f3f3fad1382b589e99f
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/operations/base.py
@@ -0,0 +1,1893 @@
+# mypy: allow-untyped-calls
+
+from __future__ import annotations
+
+from contextlib import contextmanager
+import re
+import textwrap
+from typing import Any
+from typing import Awaitable
+from typing import Callable
+from typing import Dict
+from typing import Iterator
+from typing import List  # noqa
+from typing import Mapping
+from typing import NoReturn
+from typing import Optional
+from typing import overload
+from typing import Sequence  # noqa
+from typing import Tuple
+from typing import Type  # noqa
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy.sql.elements import conv
+
+from . import batch
+from . import schemaobj
+from .. import util
+from ..util import sqla_compat
+from ..util.compat import formatannotation_fwdref
+from ..util.compat import inspect_formatargspec
+from ..util.compat import inspect_getfullargspec
+from ..util.sqla_compat import _literal_bindparam
+
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy import Table
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.expression import ColumnElement
+    from sqlalchemy.sql.expression import TableClause
+    from sqlalchemy.sql.expression import TextClause
+    from sqlalchemy.sql.functions import Function
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Computed
+    from sqlalchemy.sql.schema import Identity
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.types import TypeEngine
+
+    from .batch import BatchOperationsImpl
+    from .ops import AddColumnOp
+    from .ops import AddConstraintOp
+    from .ops import AlterColumnOp
+    from .ops import AlterTableOp
+    from .ops import BulkInsertOp
+    from .ops import CreateIndexOp
+    from .ops import CreateTableCommentOp
+    from .ops import CreateTableOp
+    from .ops import DropColumnOp
+    from .ops import DropConstraintOp
+    from .ops import DropIndexOp
+    from .ops import DropTableCommentOp
+    from .ops import DropTableOp
+    from .ops import ExecuteSQLOp
+    from .ops import MigrateOperation
+    from ..ddl import DefaultImpl
+    from ..runtime.migration import MigrationContext
+__all__ = ("Operations", "BatchOperations")
+_T = TypeVar("_T")
+
+_C = TypeVar("_C", bound=Callable[..., Any])
+
+
+class AbstractOperations(util.ModuleClsProxy):
+    """Base class for Operations and BatchOperations.
+
+    .. versionadded:: 1.11.0
+
+    """
+
+    impl: Union[DefaultImpl, BatchOperationsImpl]
+    _to_impl = util.Dispatcher()
+
+    def __init__(
+        self,
+        migration_context: MigrationContext,
+        impl: Optional[BatchOperationsImpl] = None,
+    ) -> None:
+        """Construct a new :class:`.Operations`
+
+        :param migration_context: a :class:`.MigrationContext`
+         instance.
+
+        """
+        self.migration_context = migration_context
+        if impl is None:
+            self.impl = migration_context.impl
+        else:
+            self.impl = impl
+
+        self.schema_obj = schemaobj.SchemaObjects(migration_context)
+
+    @classmethod
+    def register_operation(
+        cls, name: str, sourcename: Optional[str] = None
+    ) -> Callable[[Type[_T]], Type[_T]]:
+        """Register a new operation for this class.
+
+        This method is normally used to add new operations
+        to the :class:`.Operations` class, and possibly the
+        :class:`.BatchOperations` class as well.   All Alembic migration
+        operations are implemented via this system, however the system
+        is also available as a public API to facilitate adding custom
+        operations.
+
+        .. seealso::
+
+            :ref:`operation_plugins`
+
+
+        """
+
+        def register(op_cls: Type[_T]) -> Type[_T]:
+            if sourcename is None:
+                fn = getattr(op_cls, name)
+                source_name = fn.__name__
+            else:
+                fn = getattr(op_cls, sourcename)
+                source_name = fn.__name__
+
+            spec = inspect_getfullargspec(fn)
+
+            name_args = spec[0]
+            assert name_args[0:2] == ["cls", "operations"]
+
+            name_args[0:2] = ["self"]
+
+            args = inspect_formatargspec(
+                *spec, formatannotation=formatannotation_fwdref
+            )
+            num_defaults = len(spec[3]) if spec[3] else 0
+
+            defaulted_vals: Tuple[Any, ...]
+
+            if num_defaults:
+                defaulted_vals = tuple(name_args[0 - num_defaults :])
+            else:
+                defaulted_vals = ()
+
+            defaulted_vals += tuple(spec[4])
+            # here, we are using formatargspec in a different way in order
+            # to get a string that will re-apply incoming arguments to a new
+            # function call
+
+            apply_kw = inspect_formatargspec(
+                name_args + spec[4],
+                spec[1],
+                spec[2],
+                defaulted_vals,
+                formatvalue=lambda x: "=" + x,
+                formatannotation=formatannotation_fwdref,
+            )
+
+            args = re.sub(
+                r'[_]?ForwardRef\(([\'"].+?[\'"])\)',
+                lambda m: m.group(1),
+                args,
+            )
+
+            func_text = textwrap.dedent(
+                """\
+            def %(name)s%(args)s:
+                %(doc)r
+                return op_cls.%(source_name)s%(apply_kw)s
+            """
+                % {
+                    "name": name,
+                    "source_name": source_name,
+                    "args": args,
+                    "apply_kw": apply_kw,
+                    "doc": fn.__doc__,
+                }
+            )
+
+            globals_ = dict(globals())
+            globals_.update({"op_cls": op_cls})
+            lcl: Dict[str, Any] = {}
+
+            exec(func_text, globals_, lcl)
+            setattr(cls, name, lcl[name])
+            fn.__func__.__doc__ = (
+                "This method is proxied on "
+                "the :class:`.%s` class, via the :meth:`.%s.%s` method."
+                % (cls.__name__, cls.__name__, name)
+            )
+            if hasattr(fn, "_legacy_translations"):
+                lcl[name]._legacy_translations = fn._legacy_translations
+            return op_cls
+
+        return register
+
+    @classmethod
+    def implementation_for(cls, op_cls: Any) -> Callable[[_C], _C]:
+        """Register an implementation for a given :class:`.MigrateOperation`.
+
+        This is part of the operation extensibility API.
+
+        .. seealso::
+
+            :ref:`operation_plugins` - example of use
+
+        """
+
+        def decorate(fn: _C) -> _C:
+            cls._to_impl.dispatch_for(op_cls)(fn)
+            return fn
+
+        return decorate
+
+    @classmethod
+    @contextmanager
+    def context(
+        cls, migration_context: MigrationContext
+    ) -> Iterator[Operations]:
+        op = Operations(migration_context)
+        op._install_proxy()
+        yield op
+        op._remove_proxy()
+
+    @contextmanager
+    def batch_alter_table(
+        self,
+        table_name: str,
+        schema: Optional[str] = None,
+        recreate: Literal["auto", "always", "never"] = "auto",
+        partial_reordering: Optional[Tuple[Any, ...]] = None,
+        copy_from: Optional[Table] = None,
+        table_args: Tuple[Any, ...] = (),
+        table_kwargs: Mapping[str, Any] = util.immutabledict(),
+        reflect_args: Tuple[Any, ...] = (),
+        reflect_kwargs: Mapping[str, Any] = util.immutabledict(),
+        naming_convention: Optional[Dict[str, str]] = None,
+    ) -> Iterator[BatchOperations]:
+        """Invoke a series of per-table migrations in batch.
+
+        Batch mode allows a series of operations specific to a table
+        to be syntactically grouped together, and allows for alternate
+        modes of table migration, in particular the "recreate" style of
+        migration required by SQLite.
+
+        "recreate" style is as follows:
+
+        1. A new table is created with the new specification, based on the
+           migration directives within the batch, using a temporary name.
+
+        2. the data copied from the existing table to the new table.
+
+        3. the existing table is dropped.
+
+        4. the new table is renamed to the existing table name.
+
+        The directive by default will only use "recreate" style on the
+        SQLite backend, and only if directives are present which require
+        this form, e.g. anything other than ``add_column()``.   The batch
+        operation on other backends will proceed using standard ALTER TABLE
+        operations.
+
+        The method is used as a context manager, which returns an instance
+        of :class:`.BatchOperations`; this object is the same as
+        :class:`.Operations` except that table names and schema names
+        are omitted.  E.g.::
+
+            with op.batch_alter_table("some_table") as batch_op:
+                batch_op.add_column(Column("foo", Integer))
+                batch_op.drop_column("bar")
+
+        The operations within the context manager are invoked at once
+        when the context is ended.   When run against SQLite, if the
+        migrations include operations not supported by SQLite's ALTER TABLE,
+        the entire table will be copied to a new one with the new
+        specification, moving all data across as well.
+
+        The copy operation by default uses reflection to retrieve the current
+        structure of the table, and therefore :meth:`.batch_alter_table`
+        in this mode requires that the migration is run in "online" mode.
+        The ``copy_from`` parameter may be passed which refers to an existing
+        :class:`.Table` object, which will bypass this reflection step.
+
+        .. note::  The table copy operation will currently not copy
+           CHECK constraints, and may not copy UNIQUE constraints that are
+           unnamed, as is possible on SQLite.   See the section
+           :ref:`sqlite_batch_constraints` for workarounds.
+
+        :param table_name: name of table
+        :param schema: optional schema name.
+        :param recreate: under what circumstances the table should be
+         recreated. At its default of ``"auto"``, the SQLite dialect will
+         recreate the table if any operations other than ``add_column()``,
+         ``create_index()``, or ``drop_index()`` are
+         present. Other options include ``"always"`` and ``"never"``.
+        :param copy_from: optional :class:`~sqlalchemy.schema.Table` object
+         that will act as the structure of the table being copied.  If omitted,
+         table reflection is used to retrieve the structure of the table.
+
+         .. seealso::
+
+            :ref:`batch_offline_mode`
+
+            :paramref:`~.Operations.batch_alter_table.reflect_args`
+
+            :paramref:`~.Operations.batch_alter_table.reflect_kwargs`
+
+        :param reflect_args: a sequence of additional positional arguments that
+         will be applied to the table structure being reflected / copied;
+         this may be used to pass column and constraint overrides to the
+         table that will be reflected, in lieu of passing the whole
+         :class:`~sqlalchemy.schema.Table` using
+         :paramref:`~.Operations.batch_alter_table.copy_from`.
+        :param reflect_kwargs: a dictionary of additional keyword arguments
+         that will be applied to the table structure being copied; this may be
+         used to pass additional table and reflection options to the table that
+         will be reflected, in lieu of passing the whole
+         :class:`~sqlalchemy.schema.Table` using
+         :paramref:`~.Operations.batch_alter_table.copy_from`.
+        :param table_args: a sequence of additional positional arguments that
+         will be applied to the new :class:`~sqlalchemy.schema.Table` when
+         created, in addition to those copied from the source table.
+         This may be used to provide additional constraints such as CHECK
+         constraints that may not be reflected.
+        :param table_kwargs: a dictionary of additional keyword arguments
+         that will be applied to the new :class:`~sqlalchemy.schema.Table`
+         when created, in addition to those copied from the source table.
+         This may be used to provide for additional table options that may
+         not be reflected.
+        :param naming_convention: a naming convention dictionary of the form
+         described at :ref:`autogen_naming_conventions` which will be applied
+         to the :class:`~sqlalchemy.schema.MetaData` during the reflection
+         process.  This is typically required if one wants to drop SQLite
+         constraints, as these constraints will not have names when
+         reflected on this backend.  Requires SQLAlchemy **0.9.4** or greater.
+
+         .. seealso::
+
+            :ref:`dropping_sqlite_foreign_keys`
+
+        :param partial_reordering: a list of tuples, each suggesting a desired
+         ordering of two or more columns in the newly created table.  Requires
+         that :paramref:`.batch_alter_table.recreate` is set to ``"always"``.
+         Examples, given a table with columns "a", "b", "c", and "d":
+
+         Specify the order of all columns::
+
+            with op.batch_alter_table(
+                "some_table",
+                recreate="always",
+                partial_reordering=[("c", "d", "a", "b")],
+            ) as batch_op:
+                pass
+
+         Ensure "d" appears before "c", and "b", appears before "a"::
+
+            with op.batch_alter_table(
+                "some_table",
+                recreate="always",
+                partial_reordering=[("d", "c"), ("b", "a")],
+            ) as batch_op:
+                pass
+
+         The ordering of columns not included in the partial_reordering
+         set is undefined.   Therefore it is best to specify the complete
+         ordering of all columns for best results.
+
+        .. note:: batch mode requires SQLAlchemy 0.8 or above.
+
+        .. seealso::
+
+            :ref:`batch_migrations`
+
+        """
+        impl = batch.BatchOperationsImpl(
+            self,
+            table_name,
+            schema,
+            recreate,
+            copy_from,
+            table_args,
+            table_kwargs,
+            reflect_args,
+            reflect_kwargs,
+            naming_convention,
+            partial_reordering,
+        )
+        batch_op = BatchOperations(self.migration_context, impl=impl)
+        yield batch_op
+        impl.flush()
+
+    def get_context(self) -> MigrationContext:
+        """Return the :class:`.MigrationContext` object that's
+        currently in use.
+
+        """
+
+        return self.migration_context
+
+    @overload
+    def invoke(self, operation: CreateTableOp) -> Table:
+        ...
+
+    @overload
+    def invoke(
+        self,
+        operation: Union[
+            AddConstraintOp,
+            DropConstraintOp,
+            CreateIndexOp,
+            DropIndexOp,
+            AddColumnOp,
+            AlterColumnOp,
+            AlterTableOp,
+            CreateTableCommentOp,
+            DropTableCommentOp,
+            DropColumnOp,
+            BulkInsertOp,
+            DropTableOp,
+            ExecuteSQLOp,
+        ],
+    ) -> None:
+        ...
+
+    @overload
+    def invoke(self, operation: MigrateOperation) -> Any:
+        ...
+
+    def invoke(self, operation: MigrateOperation) -> Any:
+        """Given a :class:`.MigrateOperation`, invoke it in terms of
+        this :class:`.Operations` instance.
+
+        """
+        fn = self._to_impl.dispatch(
+            operation, self.migration_context.impl.__dialect__
+        )
+        return fn(self, operation)
+
+    def f(self, name: str) -> conv:
+        """Indicate a string name that has already had a naming convention
+        applied to it.
+
+        This feature combines with the SQLAlchemy ``naming_convention`` feature
+        to disambiguate constraint names that have already had naming
+        conventions applied to them, versus those that have not.  This is
+        necessary in the case that the ``"%(constraint_name)s"`` token
+        is used within a naming convention, so that it can be identified
+        that this particular name should remain fixed.
+
+        If the :meth:`.Operations.f` is used on a constraint, the naming
+        convention will not take effect::
+
+            op.add_column("t", "x", Boolean(name=op.f("ck_bool_t_x")))
+
+        Above, the CHECK constraint generated will have the name
+        ``ck_bool_t_x`` regardless of whether or not a naming convention is
+        in use.
+
+        Alternatively, if a naming convention is in use, and 'f' is not used,
+        names will be converted along conventions.  If the ``target_metadata``
+        contains the naming convention
+        ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the
+        output of the following:
+
+            op.add_column("t", "x", Boolean(name="x"))
+
+        will be::
+
+            CONSTRAINT ck_bool_t_x CHECK (x in (1, 0)))
+
+        The function is rendered in the output of autogenerate when
+        a particular constraint name is already converted.
+
+        """
+        return conv(name)
+
+    def inline_literal(
+        self, value: Union[str, int], type_: Optional[TypeEngine[Any]] = None
+    ) -> _literal_bindparam:
+        r"""Produce an 'inline literal' expression, suitable for
+        using in an INSERT, UPDATE, or DELETE statement.
+
+        When using Alembic in "offline" mode, CRUD operations
+        aren't compatible with SQLAlchemy's default behavior surrounding
+        literal values,
+        which is that they are converted into bound values and passed
+        separately into the ``execute()`` method of the DBAPI cursor.
+        An offline SQL
+        script needs to have these rendered inline.  While it should
+        always be noted that inline literal values are an **enormous**
+        security hole in an application that handles untrusted input,
+        a schema migration is not run in this context, so
+        literals are safe to render inline, with the caveat that
+        advanced types like dates may not be supported directly
+        by SQLAlchemy.
+
+        See :meth:`.Operations.execute` for an example usage of
+        :meth:`.Operations.inline_literal`.
+
+        The environment can also be configured to attempt to render
+        "literal" values inline automatically, for those simple types
+        that are supported by the dialect; see
+        :paramref:`.EnvironmentContext.configure.literal_binds` for this
+        more recently added feature.
+
+        :param value: The value to render.  Strings, integers, and simple
+         numerics should be supported.   Other types like boolean,
+         dates, etc. may or may not be supported yet by various
+         backends.
+        :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine`
+         subclass stating the type of this value.  In SQLAlchemy
+         expressions, this is usually derived automatically
+         from the Python type of the value itself, as well as
+         based on the context in which the value is used.
+
+        .. seealso::
+
+            :paramref:`.EnvironmentContext.configure.literal_binds`
+
+        """
+        return sqla_compat._literal_bindparam(None, value, type_=type_)
+
+    def get_bind(self) -> Connection:
+        """Return the current 'bind'.
+
+        Under normal circumstances, this is the
+        :class:`~sqlalchemy.engine.Connection` currently being used
+        to emit SQL to the database.
+
+        In a SQL script context, this value is ``None``. [TODO: verify this]
+
+        """
+        return self.migration_context.impl.bind  # type: ignore[return-value]
+
+    def run_async(
+        self,
+        async_function: Callable[..., Awaitable[_T]],
+        *args: Any,
+        **kw_args: Any,
+    ) -> _T:
+        """Invoke the given asynchronous callable, passing an asynchronous
+        :class:`~sqlalchemy.ext.asyncio.AsyncConnection` as the first
+        argument.
+
+        This method allows calling async functions from within the
+        synchronous ``upgrade()`` or ``downgrade()`` alembic migration
+        method.
+
+        The async connection passed to the callable shares the same
+        transaction as the connection running in the migration context.
+
+        Any additional arg or kw_arg passed to this function are passed
+        to the provided async function.
+
+        .. versionadded: 1.11
+
+        .. note::
+
+            This method can be called only when alembic is called using
+            an async dialect.
+        """
+        if not sqla_compat.sqla_14_18:
+            raise NotImplementedError("SQLAlchemy 1.4.18+ required")
+        sync_conn = self.get_bind()
+        if sync_conn is None:
+            raise NotImplementedError("Cannot call run_async in SQL mode")
+        if not sync_conn.dialect.is_async:
+            raise ValueError("Cannot call run_async with a sync engine")
+        from sqlalchemy.ext.asyncio import AsyncConnection
+        from sqlalchemy.util import await_only
+
+        async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn)
+        return await_only(async_function(async_conn, *args, **kw_args))
+
+
+class Operations(AbstractOperations):
+    """Define high level migration operations.
+
+    Each operation corresponds to some schema migration operation,
+    executed against a particular :class:`.MigrationContext`
+    which in turn represents connectivity to a database,
+    or a file output stream.
+
+    While :class:`.Operations` is normally configured as
+    part of the :meth:`.EnvironmentContext.run_migrations`
+    method called from an ``env.py`` script, a standalone
+    :class:`.Operations` instance can be
+    made for use cases external to regular Alembic
+    migrations by passing in a :class:`.MigrationContext`::
+
+        from alembic.migration import MigrationContext
+        from alembic.operations import Operations
+
+        conn = myengine.connect()
+        ctx = MigrationContext.configure(conn)
+        op = Operations(ctx)
+
+        op.alter_column("t", "c", nullable=True)
+
+    Note that as of 0.8, most of the methods on this class are produced
+    dynamically using the :meth:`.Operations.register_operation`
+    method.
+
+    """
+
+    if TYPE_CHECKING:
+        # START STUB FUNCTIONS: op_cls
+        # ### the following stubs are generated by tools/write_pyi.py ###
+        # ### do not edit ###
+
+        def add_column(
+            self,
+            table_name: str,
+            column: Column[Any],
+            *,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Issue an "add column" instruction using the current
+            migration context.
+
+            e.g.::
+
+                from alembic import op
+                from sqlalchemy import Column, String
+
+                op.add_column("organization", Column("name", String()))
+
+            The :meth:`.Operations.add_column` method typically corresponds
+            to the SQL command "ALTER TABLE... ADD COLUMN".    Within the scope
+            of this command, the column's name, datatype, nullability,
+            and optional server-generated defaults may be indicated.
+
+            .. note::
+
+                With the exception of NOT NULL constraints or single-column FOREIGN
+                KEY constraints, other kinds of constraints such as PRIMARY KEY,
+                UNIQUE or CHECK constraints **cannot** be generated using this
+                method; for these constraints, refer to operations such as
+                :meth:`.Operations.create_primary_key` and
+                :meth:`.Operations.create_check_constraint`. In particular, the
+                following :class:`~sqlalchemy.schema.Column` parameters are
+                **ignored**:
+
+                * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
+                  typically do not support an ALTER operation that can add
+                  individual columns one at a time to an existing primary key
+                  constraint, therefore it's less ambiguous to use the
+                  :meth:`.Operations.create_primary_key` method, which assumes no
+                  existing primary key constraint is present.
+                * :paramref:`~sqlalchemy.schema.Column.unique` - use the
+                  :meth:`.Operations.create_unique_constraint` method
+                * :paramref:`~sqlalchemy.schema.Column.index` - use the
+                  :meth:`.Operations.create_index` method
+
+
+            The provided :class:`~sqlalchemy.schema.Column` object may include a
+            :class:`~sqlalchemy.schema.ForeignKey` constraint directive,
+            referencing a remote table name. For this specific type of constraint,
+            Alembic will automatically emit a second ALTER statement in order to
+            add the single-column FOREIGN KEY constraint separately::
+
+                from alembic import op
+                from sqlalchemy import Column, INTEGER, ForeignKey
+
+                op.add_column(
+                    "organization",
+                    Column("account_id", INTEGER, ForeignKey("accounts.id")),
+                )
+
+            The column argument passed to :meth:`.Operations.add_column` is a
+            :class:`~sqlalchemy.schema.Column` construct, used in the same way it's
+            used in SQLAlchemy. In particular, values or functions to be indicated
+            as producing the column's default value on the database side are
+            specified using the ``server_default`` parameter, and not ``default``
+            which only specifies Python-side defaults::
+
+                from alembic import op
+                from sqlalchemy import Column, TIMESTAMP, func
+
+                # specify "DEFAULT NOW" along with the column add
+                op.add_column(
+                    "account",
+                    Column("timestamp", TIMESTAMP, server_default=func.now()),
+                )
+
+            :param table_name: String name of the parent table.
+            :param column: a :class:`sqlalchemy.schema.Column` object
+             representing the new column.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def alter_column(
+            self,
+            table_name: str,
+            column_name: str,
+            *,
+            nullable: Optional[bool] = None,
+            comment: Union[str, Literal[False], None] = False,
+            server_default: Any = False,
+            new_column_name: Optional[str] = None,
+            type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
+            existing_type: Union[
+                TypeEngine[Any], Type[TypeEngine[Any]], None
+            ] = None,
+            existing_server_default: Union[
+                str, bool, Identity, Computed, None
+            ] = False,
+            existing_nullable: Optional[bool] = None,
+            existing_comment: Optional[str] = None,
+            schema: Optional[str] = None,
+            **kw: Any,
+        ) -> None:
+            r"""Issue an "alter column" instruction using the
+            current migration context.
+
+            Generally, only that aspect of the column which
+            is being changed, i.e. name, type, nullability,
+            default, needs to be specified.  Multiple changes
+            can also be specified at once and the backend should
+            "do the right thing", emitting each change either
+            separately or together as the backend allows.
+
+            MySQL has special requirements here, since MySQL
+            cannot ALTER a column without a full specification.
+            When producing MySQL-compatible migration files,
+            it is recommended that the ``existing_type``,
+            ``existing_server_default``, and ``existing_nullable``
+            parameters be present, if not being altered.
+
+            Type changes which are against the SQLAlchemy
+            "schema" types :class:`~sqlalchemy.types.Boolean`
+            and  :class:`~sqlalchemy.types.Enum` may also
+            add or drop constraints which accompany those
+            types on backends that don't support them natively.
+            The ``existing_type`` argument is
+            used in this case to identify and remove a previous
+            constraint that was bound to the type object.
+
+            :param table_name: string name of the target table.
+            :param column_name: string name of the target column,
+             as it exists before the operation begins.
+            :param nullable: Optional; specify ``True`` or ``False``
+             to alter the column's nullability.
+            :param server_default: Optional; specify a string
+             SQL expression, :func:`~sqlalchemy.sql.expression.text`,
+             or :class:`~sqlalchemy.schema.DefaultClause` to indicate
+             an alteration to the column's default value.
+             Set to ``None`` to have the default removed.
+            :param comment: optional string text of a new comment to add to the
+             column.
+            :param new_column_name: Optional; specify a string name here to
+             indicate the new name within a column rename operation.
+            :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
+             type object to specify a change to the column's type.
+             For SQLAlchemy types that also indicate a constraint (i.e.
+             :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
+             the constraint is also generated.
+            :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
+             currently understood by the MySQL dialect.
+            :param existing_type: Optional; a
+             :class:`~sqlalchemy.types.TypeEngine`
+             type object to specify the previous type.   This
+             is required for all MySQL column alter operations that
+             don't otherwise specify a new type, as well as for
+             when nullability is being changed on a SQL Server
+             column.  It is also used if the type is a so-called
+             SQLAlchemy "schema" type which may define a constraint (i.e.
+             :class:`~sqlalchemy.types.Boolean`,
+             :class:`~sqlalchemy.types.Enum`),
+             so that the constraint can be dropped.
+            :param existing_server_default: Optional; The existing
+             default value of the column.   Required on MySQL if
+             an existing default is not being changed; else MySQL
+             removes the default.
+            :param existing_nullable: Optional; the existing nullability
+             of the column.  Required on MySQL if the existing nullability
+             is not being changed; else MySQL sets this to NULL.
+            :param existing_autoincrement: Optional; the existing autoincrement
+             of the column.  Used for MySQL's system of altering a column
+             that specifies ``AUTO_INCREMENT``.
+            :param existing_comment: string text of the existing comment on the
+             column to be maintained.  Required on MySQL if the existing comment
+             on the column is not being changed.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param postgresql_using: String argument which will indicate a
+             SQL expression to render within the Postgresql-specific USING clause
+             within ALTER COLUMN.    This string is taken directly as raw SQL which
+             must explicitly include any necessary quoting or escaping of tokens
+             within the expression.
+
+            """  # noqa: E501
+            ...
+
+        def bulk_insert(
+            self,
+            table: Union[Table, TableClause],
+            rows: List[Dict[str, Any]],
+            *,
+            multiinsert: bool = True,
+        ) -> None:
+            """Issue a "bulk insert" operation using the current
+            migration context.
+
+            This provides a means of representing an INSERT of multiple rows
+            which works equally well in the context of executing on a live
+            connection as well as that of generating a SQL script.   In the
+            case of a SQL script, the values are rendered inline into the
+            statement.
+
+            e.g.::
+
+                from alembic import op
+                from datetime import date
+                from sqlalchemy.sql import table, column
+                from sqlalchemy import String, Integer, Date
+
+                # Create an ad-hoc table to use for the insert statement.
+                accounts_table = table(
+                    "account",
+                    column("id", Integer),
+                    column("name", String),
+                    column("create_date", Date),
+                )
+
+                op.bulk_insert(
+                    accounts_table,
+                    [
+                        {
+                            "id": 1,
+                            "name": "John Smith",
+                            "create_date": date(2010, 10, 5),
+                        },
+                        {
+                            "id": 2,
+                            "name": "Ed Williams",
+                            "create_date": date(2007, 5, 27),
+                        },
+                        {
+                            "id": 3,
+                            "name": "Wendy Jones",
+                            "create_date": date(2008, 8, 15),
+                        },
+                    ],
+                )
+
+            When using --sql mode, some datatypes may not render inline
+            automatically, such as dates and other special types.   When this
+            issue is present, :meth:`.Operations.inline_literal` may be used::
+
+                op.bulk_insert(
+                    accounts_table,
+                    [
+                        {
+                            "id": 1,
+                            "name": "John Smith",
+                            "create_date": op.inline_literal("2010-10-05"),
+                        },
+                        {
+                            "id": 2,
+                            "name": "Ed Williams",
+                            "create_date": op.inline_literal("2007-05-27"),
+                        },
+                        {
+                            "id": 3,
+                            "name": "Wendy Jones",
+                            "create_date": op.inline_literal("2008-08-15"),
+                        },
+                    ],
+                    multiinsert=False,
+                )
+
+            When using :meth:`.Operations.inline_literal` in conjunction with
+            :meth:`.Operations.bulk_insert`, in order for the statement to work
+            in "online" (e.g. non --sql) mode, the
+            :paramref:`~.Operations.bulk_insert.multiinsert`
+            flag should be set to ``False``, which will have the effect of
+            individual INSERT statements being emitted to the database, each
+            with a distinct VALUES clause, so that the "inline" values can
+            still be rendered, rather than attempting to pass the values
+            as bound parameters.
+
+            :param table: a table object which represents the target of the INSERT.
+
+            :param rows: a list of dictionaries indicating rows.
+
+            :param multiinsert: when at its default of True and --sql mode is not
+               enabled, the INSERT statement will be executed using
+               "executemany()" style, where all elements in the list of
+               dictionaries are passed as bound parameters in a single
+               list.   Setting this to False results in individual INSERT
+               statements being emitted per parameter set, and is needed
+               in those cases where non-literal values are present in the
+               parameter sets.
+
+            """  # noqa: E501
+            ...
+
+        def create_check_constraint(
+            self,
+            constraint_name: Optional[str],
+            table_name: str,
+            condition: Union[str, ColumnElement[bool], TextClause],
+            *,
+            schema: Optional[str] = None,
+            **kw: Any,
+        ) -> None:
+            """Issue a "create check constraint" instruction using the
+            current migration context.
+
+            e.g.::
+
+                from alembic import op
+                from sqlalchemy.sql import column, func
+
+                op.create_check_constraint(
+                    "ck_user_name_len",
+                    "user",
+                    func.len(column("name")) > 5,
+                )
+
+            CHECK constraints are usually against a SQL expression, so ad-hoc
+            table metadata is usually needed.   The function will convert the given
+            arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
+            to an anonymous table in order to emit the CREATE statement.
+
+            :param name: Name of the check constraint.  The name is necessary
+             so that an ALTER statement can be emitted.  For setups that
+             use an automated naming scheme such as that described at
+             :ref:`sqla:constraint_naming_conventions`,
+             ``name`` here can be ``None``, as the event listener will
+             apply the name to the constraint object when it is associated
+             with the table.
+            :param table_name: String name of the source table.
+            :param condition: SQL expression that's the condition of the
+             constraint. Can be a string or SQLAlchemy expression language
+             structure.
+            :param deferrable: optional bool. If set, emit DEFERRABLE or
+             NOT DEFERRABLE when issuing DDL for this constraint.
+            :param initially: optional string. If set, emit INITIALLY <value>
+             when issuing DDL for this constraint.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def create_exclude_constraint(
+            self,
+            constraint_name: str,
+            table_name: str,
+            *elements: Any,
+            **kw: Any,
+        ) -> Optional[Table]:
+            """Issue an alter to create an EXCLUDE constraint using the
+            current migration context.
+
+            .. note::  This method is Postgresql specific, and additionally
+               requires at least SQLAlchemy 1.0.
+
+            e.g.::
+
+                from alembic import op
+
+                op.create_exclude_constraint(
+                    "user_excl",
+                    "user",
+                    ("period", "&&"),
+                    ("group", "="),
+                    where=("group != 'some group'"),
+                )
+
+            Note that the expressions work the same way as that of
+            the ``ExcludeConstraint`` object itself; if plain strings are
+            passed, quoting rules must be applied manually.
+
+            :param name: Name of the constraint.
+            :param table_name: String name of the source table.
+            :param elements: exclude conditions.
+            :param where: SQL expression or SQL string with optional WHERE
+             clause.
+            :param deferrable: optional bool. If set, emit DEFERRABLE or
+             NOT DEFERRABLE when issuing DDL for this constraint.
+            :param initially: optional string. If set, emit INITIALLY <value>
+             when issuing DDL for this constraint.
+            :param schema: Optional schema name to operate within.
+
+            """  # noqa: E501
+            ...
+
+        def create_foreign_key(
+            self,
+            constraint_name: Optional[str],
+            source_table: str,
+            referent_table: str,
+            local_cols: List[str],
+            remote_cols: List[str],
+            *,
+            onupdate: Optional[str] = None,
+            ondelete: Optional[str] = None,
+            deferrable: Optional[bool] = None,
+            initially: Optional[str] = None,
+            match: Optional[str] = None,
+            source_schema: Optional[str] = None,
+            referent_schema: Optional[str] = None,
+            **dialect_kw: Any,
+        ) -> None:
+            """Issue a "create foreign key" instruction using the
+            current migration context.
+
+            e.g.::
+
+                from alembic import op
+
+                op.create_foreign_key(
+                    "fk_user_address",
+                    "address",
+                    "user",
+                    ["user_id"],
+                    ["id"],
+                )
+
+            This internally generates a :class:`~sqlalchemy.schema.Table` object
+            containing the necessary columns, then generates a new
+            :class:`~sqlalchemy.schema.ForeignKeyConstraint`
+            object which it then associates with the
+            :class:`~sqlalchemy.schema.Table`.
+            Any event listeners associated with this action will be fired
+            off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+            construct is ultimately used to generate the ALTER statement.
+
+            :param constraint_name: Name of the foreign key constraint.  The name
+             is necessary so that an ALTER statement can be emitted.  For setups
+             that use an automated naming scheme such as that described at
+             :ref:`sqla:constraint_naming_conventions`,
+             ``name`` here can be ``None``, as the event listener will
+             apply the name to the constraint object when it is associated
+             with the table.
+            :param source_table: String name of the source table.
+            :param referent_table: String name of the destination table.
+            :param local_cols: a list of string column names in the
+             source table.
+            :param remote_cols: a list of string column names in the
+             remote table.
+            :param onupdate: Optional string. If set, emit ON UPDATE <value> when
+             issuing DDL for this constraint. Typical values include CASCADE,
+             DELETE and RESTRICT.
+            :param ondelete: Optional string. If set, emit ON DELETE <value> when
+             issuing DDL for this constraint. Typical values include CASCADE,
+             DELETE and RESTRICT.
+            :param deferrable: optional bool. If set, emit DEFERRABLE or NOT
+             DEFERRABLE when issuing DDL for this constraint.
+            :param source_schema: Optional schema name of the source table.
+            :param referent_schema: Optional schema name of the destination table.
+
+            """  # noqa: E501
+            ...
+
+        def create_index(
+            self,
+            index_name: Optional[str],
+            table_name: str,
+            columns: Sequence[Union[str, TextClause, Function[Any]]],
+            *,
+            schema: Optional[str] = None,
+            unique: bool = False,
+            if_not_exists: Optional[bool] = None,
+            **kw: Any,
+        ) -> None:
+            r"""Issue a "create index" instruction using the current
+            migration context.
+
+            e.g.::
+
+                from alembic import op
+
+                op.create_index("ik_test", "t1", ["foo", "bar"])
+
+            Functional indexes can be produced by using the
+            :func:`sqlalchemy.sql.expression.text` construct::
+
+                from alembic import op
+                from sqlalchemy import text
+
+                op.create_index("ik_test", "t1", [text("lower(foo)")])
+
+            :param index_name: name of the index.
+            :param table_name: name of the owning table.
+            :param columns: a list consisting of string column names and/or
+             :func:`~sqlalchemy.sql.expression.text` constructs.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param unique: If True, create a unique index.
+
+            :param quote: Force quoting of this column's name on or off,
+             corresponding to ``True`` or ``False``. When left at its default
+             of ``None``, the column identifier will be quoted according to
+             whether the name is case sensitive (identifiers with at least one
+             upper case character are treated as case sensitive), or if it's a
+             reserved word. This flag is only needed to force quoting of a
+             reserved word which is not known by the SQLAlchemy dialect.
+
+            :param if_not_exists: If True, adds IF NOT EXISTS operator when
+             creating the new index.
+
+             .. versionadded:: 1.12.0
+
+            :param \**kw: Additional keyword arguments not mentioned above are
+             dialect specific, and passed in the form
+             ``<dialectname>_<argname>``.
+             See the documentation regarding an individual dialect at
+             :ref:`dialect_toplevel` for detail on documented arguments.
+
+            """  # noqa: E501
+            ...
+
+        def create_primary_key(
+            self,
+            constraint_name: Optional[str],
+            table_name: str,
+            columns: List[str],
+            *,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Issue a "create primary key" instruction using the current
+            migration context.
+
+            e.g.::
+
+                from alembic import op
+
+                op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
+
+            This internally generates a :class:`~sqlalchemy.schema.Table` object
+            containing the necessary columns, then generates a new
+            :class:`~sqlalchemy.schema.PrimaryKeyConstraint`
+            object which it then associates with the
+            :class:`~sqlalchemy.schema.Table`.
+            Any event listeners associated with this action will be fired
+            off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+            construct is ultimately used to generate the ALTER statement.
+
+            :param constraint_name: Name of the primary key constraint.  The name
+             is necessary so that an ALTER statement can be emitted.  For setups
+             that use an automated naming scheme such as that described at
+             :ref:`sqla:constraint_naming_conventions`
+             ``name`` here can be ``None``, as the event listener will
+             apply the name to the constraint object when it is associated
+             with the table.
+            :param table_name: String name of the target table.
+            :param columns: a list of string column names to be applied to the
+             primary key constraint.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def create_table(
+            self, table_name: str, *columns: SchemaItem, **kw: Any
+        ) -> Table:
+            r"""Issue a "create table" instruction using the current migration
+            context.
+
+            This directive receives an argument list similar to that of the
+            traditional :class:`sqlalchemy.schema.Table` construct, but without the
+            metadata::
+
+                from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+                from alembic import op
+
+                op.create_table(
+                    "account",
+                    Column("id", INTEGER, primary_key=True),
+                    Column("name", VARCHAR(50), nullable=False),
+                    Column("description", NVARCHAR(200)),
+                    Column("timestamp", TIMESTAMP, server_default=func.now()),
+                )
+
+            Note that :meth:`.create_table` accepts
+            :class:`~sqlalchemy.schema.Column`
+            constructs directly from the SQLAlchemy library.  In particular,
+            default values to be created on the database side are
+            specified using the ``server_default`` parameter, and not
+            ``default`` which only specifies Python-side defaults::
+
+                from alembic import op
+                from sqlalchemy import Column, TIMESTAMP, func
+
+                # specify "DEFAULT NOW" along with the "timestamp" column
+                op.create_table(
+                    "account",
+                    Column("id", INTEGER, primary_key=True),
+                    Column("timestamp", TIMESTAMP, server_default=func.now()),
+                )
+
+            The function also returns a newly created
+            :class:`~sqlalchemy.schema.Table` object, corresponding to the table
+            specification given, which is suitable for
+            immediate SQL operations, in particular
+            :meth:`.Operations.bulk_insert`::
+
+                from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+                from alembic import op
+
+                account_table = op.create_table(
+                    "account",
+                    Column("id", INTEGER, primary_key=True),
+                    Column("name", VARCHAR(50), nullable=False),
+                    Column("description", NVARCHAR(200)),
+                    Column("timestamp", TIMESTAMP, server_default=func.now()),
+                )
+
+                op.bulk_insert(
+                    account_table,
+                    [
+                        {"name": "A1", "description": "account 1"},
+                        {"name": "A2", "description": "account 2"},
+                    ],
+                )
+
+            :param table_name: Name of the table
+            :param \*columns: collection of :class:`~sqlalchemy.schema.Column`
+             objects within
+             the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
+             objects
+             and :class:`~.sqlalchemy.schema.Index` objects.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param \**kw: Other keyword arguments are passed to the underlying
+             :class:`sqlalchemy.schema.Table` object created for the command.
+
+            :return: the :class:`~sqlalchemy.schema.Table` object corresponding
+             to the parameters given.
+
+            """  # noqa: E501
+            ...
+
+        def create_table_comment(
+            self,
+            table_name: str,
+            comment: Optional[str],
+            *,
+            existing_comment: Optional[str] = None,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Emit a COMMENT ON operation to set the comment for a table.
+
+            :param table_name: string name of the target table.
+            :param comment: string value of the comment being registered against
+             the specified table.
+            :param existing_comment: String value of a comment
+             already registered on the specified table, used within autogenerate
+             so that the operation is reversible, but not required for direct
+             use.
+
+            .. seealso::
+
+                :meth:`.Operations.drop_table_comment`
+
+                :paramref:`.Operations.alter_column.comment`
+
+            """  # noqa: E501
+            ...
+
+        def create_unique_constraint(
+            self,
+            constraint_name: Optional[str],
+            table_name: str,
+            columns: Sequence[str],
+            *,
+            schema: Optional[str] = None,
+            **kw: Any,
+        ) -> Any:
+            """Issue a "create unique constraint" instruction using the
+            current migration context.
+
+            e.g.::
+
+                from alembic import op
+                op.create_unique_constraint("uq_user_name", "user", ["name"])
+
+            This internally generates a :class:`~sqlalchemy.schema.Table` object
+            containing the necessary columns, then generates a new
+            :class:`~sqlalchemy.schema.UniqueConstraint`
+            object which it then associates with the
+            :class:`~sqlalchemy.schema.Table`.
+            Any event listeners associated with this action will be fired
+            off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+            construct is ultimately used to generate the ALTER statement.
+
+            :param name: Name of the unique constraint.  The name is necessary
+             so that an ALTER statement can be emitted.  For setups that
+             use an automated naming scheme such as that described at
+             :ref:`sqla:constraint_naming_conventions`,
+             ``name`` here can be ``None``, as the event listener will
+             apply the name to the constraint object when it is associated
+             with the table.
+            :param table_name: String name of the source table.
+            :param columns: a list of string column names in the
+             source table.
+            :param deferrable: optional bool. If set, emit DEFERRABLE or
+             NOT DEFERRABLE when issuing DDL for this constraint.
+            :param initially: optional string. If set, emit INITIALLY <value>
+             when issuing DDL for this constraint.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def drop_column(
+            self,
+            table_name: str,
+            column_name: str,
+            *,
+            schema: Optional[str] = None,
+            **kw: Any,
+        ) -> None:
+            """Issue a "drop column" instruction using the current
+            migration context.
+
+            e.g.::
+
+                drop_column("organization", "account_id")
+
+            :param table_name: name of table
+            :param column_name: name of column
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param mssql_drop_check: Optional boolean.  When ``True``, on
+             Microsoft SQL Server only, first
+             drop the CHECK constraint on the column using a
+             SQL-script-compatible
+             block that selects into a @variable from sys.check_constraints,
+             then exec's a separate DROP CONSTRAINT for that constraint.
+            :param mssql_drop_default: Optional boolean.  When ``True``, on
+             Microsoft SQL Server only, first
+             drop the DEFAULT constraint on the column using a
+             SQL-script-compatible
+             block that selects into a @variable from sys.default_constraints,
+             then exec's a separate DROP CONSTRAINT for that default.
+            :param mssql_drop_foreign_key: Optional boolean.  When ``True``, on
+             Microsoft SQL Server only, first
+             drop a single FOREIGN KEY constraint on the column using a
+             SQL-script-compatible
+             block that selects into a @variable from
+             sys.foreign_keys/sys.foreign_key_columns,
+             then exec's a separate DROP CONSTRAINT for that default.  Only
+             works if the column has exactly one FK constraint which refers to
+             it, at the moment.
+
+            """  # noqa: E501
+            ...
+
+        def drop_constraint(
+            self,
+            constraint_name: str,
+            table_name: str,
+            type_: Optional[str] = None,
+            *,
+            schema: Optional[str] = None,
+        ) -> None:
+            r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
+
+            :param constraint_name: name of the constraint.
+            :param table_name: table name.
+            :param type\_: optional, required on MySQL.  can be
+             'foreignkey', 'primary', 'unique', or 'check'.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        def drop_index(
+            self,
+            index_name: str,
+            table_name: Optional[str] = None,
+            *,
+            schema: Optional[str] = None,
+            if_exists: Optional[bool] = None,
+            **kw: Any,
+        ) -> None:
+            r"""Issue a "drop index" instruction using the current
+            migration context.
+
+            e.g.::
+
+                drop_index("accounts")
+
+            :param index_name: name of the index.
+            :param table_name: name of the owning table.  Some
+             backends such as Microsoft SQL Server require this.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            :param if_exists: If True, adds IF EXISTS operator when
+             dropping the index.
+
+             .. versionadded:: 1.12.0
+
+            :param \**kw: Additional keyword arguments not mentioned above are
+             dialect specific, and passed in the form
+             ``<dialectname>_<argname>``.
+             See the documentation regarding an individual dialect at
+             :ref:`dialect_toplevel` for detail on documented arguments.
+
+            """  # noqa: E501
+            ...
+
+        def drop_table(
+            self, table_name: str, *, schema: Optional[str] = None, **kw: Any
+        ) -> None:
+            r"""Issue a "drop table" instruction using the current
+            migration context.
+
+
+            e.g.::
+
+                drop_table("accounts")
+
+            :param table_name: Name of the table
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+            :param \**kw: Other keyword arguments are passed to the underlying
+             :class:`sqlalchemy.schema.Table` object created for the command.
+
+            """  # noqa: E501
+            ...
+
+        def drop_table_comment(
+            self,
+            table_name: str,
+            *,
+            existing_comment: Optional[str] = None,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Issue a "drop table comment" operation to
+            remove an existing comment set on a table.
+
+            :param table_name: string name of the target table.
+            :param existing_comment: An optional string value of a comment already
+             registered on the specified table.
+
+            .. seealso::
+
+                :meth:`.Operations.create_table_comment`
+
+                :paramref:`.Operations.alter_column.comment`
+
+            """  # noqa: E501
+            ...
+
+        def execute(
+            self,
+            sqltext: Union[Executable, str],
+            *,
+            execution_options: Optional[dict[str, Any]] = None,
+        ) -> None:
+            r"""Execute the given SQL using the current migration context.
+
+            The given SQL can be a plain string, e.g.::
+
+                op.execute("INSERT INTO table (foo) VALUES ('some value')")
+
+            Or it can be any kind of Core SQL Expression construct, such as
+            below where we use an update construct::
+
+                from sqlalchemy.sql import table, column
+                from sqlalchemy import String
+                from alembic import op
+
+                account = table("account", column("name", String))
+                op.execute(
+                    account.update()
+                    .where(account.c.name == op.inline_literal("account 1"))
+                    .values({"name": op.inline_literal("account 2")})
+                )
+
+            Above, we made use of the SQLAlchemy
+            :func:`sqlalchemy.sql.expression.table` and
+            :func:`sqlalchemy.sql.expression.column` constructs to make a brief,
+            ad-hoc table construct just for our UPDATE statement.  A full
+            :class:`~sqlalchemy.schema.Table` construct of course works perfectly
+            fine as well, though note it's a recommended practice to at least
+            ensure the definition of a table is self-contained within the migration
+            script, rather than imported from a module that may break compatibility
+            with older migrations.
+
+            In a SQL script context, the statement is emitted directly to the
+            output stream.   There is *no* return result, however, as this
+            function is oriented towards generating a change script
+            that can run in "offline" mode.     Additionally, parameterized
+            statements are discouraged here, as they *will not work* in offline
+            mode.  Above, we use :meth:`.inline_literal` where parameters are
+            to be used.
+
+            For full interaction with a connected database where parameters can
+            also be used normally, use the "bind" available from the context::
+
+                from alembic import op
+
+                connection = op.get_bind()
+
+                connection.execute(
+                    account.update()
+                    .where(account.c.name == "account 1")
+                    .values({"name": "account 2"})
+                )
+
+            Additionally, when passing the statement as a plain string, it is first
+            coerced into a :func:`sqlalchemy.sql.expression.text` construct
+            before being passed along.  In the less likely case that the
+            literal SQL string contains a colon, it must be escaped with a
+            backslash, as::
+
+               op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
+
+
+            :param sqltext: Any legal SQLAlchemy expression, including:
+
+            * a string
+            * a :func:`sqlalchemy.sql.expression.text` construct.
+            * a :func:`sqlalchemy.sql.expression.insert` construct.
+            * a :func:`sqlalchemy.sql.expression.update` construct.
+            * a :func:`sqlalchemy.sql.expression.delete` construct.
+            * Any "executable" described in SQLAlchemy Core documentation,
+              noting that no result set is returned.
+
+            .. note::  when passing a plain string, the statement is coerced into
+               a :func:`sqlalchemy.sql.expression.text` construct. This construct
+               considers symbols with colons, e.g. ``:foo`` to be bound parameters.
+               To avoid this, ensure that colon symbols are escaped, e.g.
+               ``\:foo``.
+
+            :param execution_options: Optional dictionary of
+             execution options, will be passed to
+             :meth:`sqlalchemy.engine.Connection.execution_options`.
+            """  # noqa: E501
+            ...
+
+        def rename_table(
+            self,
+            old_table_name: str,
+            new_table_name: str,
+            *,
+            schema: Optional[str] = None,
+        ) -> None:
+            """Emit an ALTER TABLE to rename a table.
+
+            :param old_table_name: old name.
+            :param new_table_name: new name.
+            :param schema: Optional schema name to operate within.  To control
+             quoting of the schema outside of the default behavior, use
+             the SQLAlchemy construct
+             :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+            """  # noqa: E501
+            ...
+
+        # END STUB FUNCTIONS: op_cls
+
+
+class BatchOperations(AbstractOperations):
+    """Modifies the interface :class:`.Operations` for batch mode.
+
+    This basically omits the ``table_name`` and ``schema`` parameters
+    from associated methods, as these are a given when running under batch
+    mode.
+
+    .. seealso::
+
+        :meth:`.Operations.batch_alter_table`
+
+    Note that as of 0.8, most of the methods on this class are produced
+    dynamically using the :meth:`.Operations.register_operation`
+    method.
+
+    """
+
+    impl: BatchOperationsImpl
+
+    def _noop(self, operation: Any) -> NoReturn:
+        raise NotImplementedError(
+            "The %s method does not apply to a batch table alter operation."
+            % operation
+        )
+
+    if TYPE_CHECKING:
+        # START STUB FUNCTIONS: batch_op
+        # ### the following stubs are generated by tools/write_pyi.py ###
+        # ### do not edit ###
+
+        def add_column(
+            self,
+            column: Column[Any],
+            *,
+            insert_before: Optional[str] = None,
+            insert_after: Optional[str] = None,
+        ) -> None:
+            """Issue an "add column" instruction using the current
+            batch migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.add_column`
+
+            """  # noqa: E501
+            ...
+
+        def alter_column(
+            self,
+            column_name: str,
+            *,
+            nullable: Optional[bool] = None,
+            comment: Union[str, Literal[False], None] = False,
+            server_default: Any = False,
+            new_column_name: Optional[str] = None,
+            type_: Union[TypeEngine[Any], Type[TypeEngine[Any]], None] = None,
+            existing_type: Union[
+                TypeEngine[Any], Type[TypeEngine[Any]], None
+            ] = None,
+            existing_server_default: Union[
+                str, bool, Identity, Computed, None
+            ] = False,
+            existing_nullable: Optional[bool] = None,
+            existing_comment: Optional[str] = None,
+            insert_before: Optional[str] = None,
+            insert_after: Optional[str] = None,
+            **kw: Any,
+        ) -> None:
+            """Issue an "alter column" instruction using the current
+            batch migration context.
+
+            Parameters are the same as that of :meth:`.Operations.alter_column`,
+            as well as the following option(s):
+
+            :param insert_before: String name of an existing column which this
+             column should be placed before, when creating the new table.
+
+            :param insert_after: String name of an existing column which this
+             column should be placed after, when creating the new table.  If
+             both :paramref:`.BatchOperations.alter_column.insert_before`
+             and :paramref:`.BatchOperations.alter_column.insert_after` are
+             omitted, the column is inserted after the last existing column
+             in the table.
+
+            .. seealso::
+
+                :meth:`.Operations.alter_column`
+
+
+            """  # noqa: E501
+            ...
+
+        def create_check_constraint(
+            self,
+            constraint_name: str,
+            condition: Union[str, ColumnElement[bool], TextClause],
+            **kw: Any,
+        ) -> None:
+            """Issue a "create check constraint" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``source`` and ``schema``
+            arguments from the call.
+
+            .. seealso::
+
+                :meth:`.Operations.create_check_constraint`
+
+            """  # noqa: E501
+            ...
+
+        def create_exclude_constraint(
+            self, constraint_name: str, *elements: Any, **kw: Any
+        ) -> Optional[Table]:
+            """Issue a "create exclude constraint" instruction using the
+            current batch migration context.
+
+            .. note::  This method is Postgresql specific, and additionally
+               requires at least SQLAlchemy 1.0.
+
+            .. seealso::
+
+                :meth:`.Operations.create_exclude_constraint`
+
+            """  # noqa: E501
+            ...
+
+        def create_foreign_key(
+            self,
+            constraint_name: str,
+            referent_table: str,
+            local_cols: List[str],
+            remote_cols: List[str],
+            *,
+            referent_schema: Optional[str] = None,
+            onupdate: Optional[str] = None,
+            ondelete: Optional[str] = None,
+            deferrable: Optional[bool] = None,
+            initially: Optional[str] = None,
+            match: Optional[str] = None,
+            **dialect_kw: Any,
+        ) -> None:
+            """Issue a "create foreign key" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``source`` and ``source_schema``
+            arguments from the call.
+
+            e.g.::
+
+                with batch_alter_table("address") as batch_op:
+                    batch_op.create_foreign_key(
+                        "fk_user_address",
+                        "user",
+                        ["user_id"],
+                        ["id"],
+                    )
+
+            .. seealso::
+
+                :meth:`.Operations.create_foreign_key`
+
+            """  # noqa: E501
+            ...
+
+        def create_index(
+            self, index_name: str, columns: List[str], **kw: Any
+        ) -> None:
+            """Issue a "create index" instruction using the
+            current batch migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.create_index`
+
+            """  # noqa: E501
+            ...
+
+        def create_primary_key(
+            self, constraint_name: str, columns: List[str]
+        ) -> None:
+            """Issue a "create primary key" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``table_name`` and ``schema``
+            arguments from the call.
+
+            .. seealso::
+
+                :meth:`.Operations.create_primary_key`
+
+            """  # noqa: E501
+            ...
+
+        def create_table_comment(
+            self,
+            comment: Optional[str],
+            *,
+            existing_comment: Optional[str] = None,
+        ) -> None:
+            """Emit a COMMENT ON operation to set the comment for a table
+            using the current batch migration context.
+
+            :param comment: string value of the comment being registered against
+             the specified table.
+            :param existing_comment: String value of a comment
+             already registered on the specified table, used within autogenerate
+             so that the operation is reversible, but not required for direct
+             use.
+
+            """  # noqa: E501
+            ...
+
+        def create_unique_constraint(
+            self, constraint_name: str, columns: Sequence[str], **kw: Any
+        ) -> Any:
+            """Issue a "create unique constraint" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``source`` and ``schema``
+            arguments from the call.
+
+            .. seealso::
+
+                :meth:`.Operations.create_unique_constraint`
+
+            """  # noqa: E501
+            ...
+
+        def drop_column(self, column_name: str, **kw: Any) -> None:
+            """Issue a "drop column" instruction using the current
+            batch migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.drop_column`
+
+            """  # noqa: E501
+            ...
+
+        def drop_constraint(
+            self, constraint_name: str, type_: Optional[str] = None
+        ) -> None:
+            """Issue a "drop constraint" instruction using the
+            current batch migration context.
+
+            The batch form of this call omits the ``table_name`` and ``schema``
+            arguments from the call.
+
+            .. seealso::
+
+                :meth:`.Operations.drop_constraint`
+
+            """  # noqa: E501
+            ...
+
+        def drop_index(self, index_name: str, **kw: Any) -> None:
+            """Issue a "drop index" instruction using the
+            current batch migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.drop_index`
+
+            """  # noqa: E501
+            ...
+
+        def drop_table_comment(
+            self, *, existing_comment: Optional[str] = None
+        ) -> None:
+            """Issue a "drop table comment" operation to
+            remove an existing comment set on a table using the current
+            batch operations context.
+
+            :param existing_comment: An optional string value of a comment already
+             registered on the specified table.
+
+            """  # noqa: E501
+            ...
+
+        def execute(
+            self,
+            sqltext: Union[Executable, str],
+            *,
+            execution_options: Optional[dict[str, Any]] = None,
+        ) -> None:
+            """Execute the given SQL using the current migration context.
+
+            .. seealso::
+
+                :meth:`.Operations.execute`
+
+            """  # noqa: E501
+            ...
+
+        # END STUB FUNCTIONS: batch_op
diff --git a/venv/Lib/site-packages/alembic/operations/batch.py b/venv/Lib/site-packages/alembic/operations/batch.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd7ab990306aff0501b39ea96fde31b81d20ff94
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/operations/batch.py
@@ -0,0 +1,717 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import CheckConstraint
+from sqlalchemy import Column
+from sqlalchemy import ForeignKeyConstraint
+from sqlalchemy import Index
+from sqlalchemy import MetaData
+from sqlalchemy import PrimaryKeyConstraint
+from sqlalchemy import schema as sql_schema
+from sqlalchemy import Table
+from sqlalchemy import types as sqltypes
+from sqlalchemy.sql.schema import SchemaEventTarget
+from sqlalchemy.util import OrderedDict
+from sqlalchemy.util import topological
+
+from ..util import exc
+from ..util.sqla_compat import _columns_for_constraint
+from ..util.sqla_compat import _copy
+from ..util.sqla_compat import _copy_expression
+from ..util.sqla_compat import _ensure_scope_for_ddl
+from ..util.sqla_compat import _fk_is_self_referential
+from ..util.sqla_compat import _idx_table_bound_expressions
+from ..util.sqla_compat import _insert_inline
+from ..util.sqla_compat import _is_type_bound
+from ..util.sqla_compat import _remove_column_from_collection
+from ..util.sqla_compat import _resolve_for_variant
+from ..util.sqla_compat import _select
+from ..util.sqla_compat import constraint_name_defined
+from ..util.sqla_compat import constraint_name_string
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.sql.elements import ColumnClause
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.functions import Function
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from ..ddl.impl import DefaultImpl
+
+
+class BatchOperationsImpl:
+    def __init__(
+        self,
+        operations,
+        table_name,
+        schema,
+        recreate,
+        copy_from,
+        table_args,
+        table_kwargs,
+        reflect_args,
+        reflect_kwargs,
+        naming_convention,
+        partial_reordering,
+    ):
+        self.operations = operations
+        self.table_name = table_name
+        self.schema = schema
+        if recreate not in ("auto", "always", "never"):
+            raise ValueError(
+                "recreate may be one of 'auto', 'always', or 'never'."
+            )
+        self.recreate = recreate
+        self.copy_from = copy_from
+        self.table_args = table_args
+        self.table_kwargs = dict(table_kwargs)
+        self.reflect_args = reflect_args
+        self.reflect_kwargs = dict(reflect_kwargs)
+        self.reflect_kwargs.setdefault(
+            "listeners", list(self.reflect_kwargs.get("listeners", ()))
+        )
+        self.reflect_kwargs["listeners"].append(
+            ("column_reflect", operations.impl.autogen_column_reflect)
+        )
+        self.naming_convention = naming_convention
+        self.partial_reordering = partial_reordering
+        self.batch = []
+
+    @property
+    def dialect(self) -> Dialect:
+        return self.operations.impl.dialect
+
+    @property
+    def impl(self) -> DefaultImpl:
+        return self.operations.impl
+
+    def _should_recreate(self) -> bool:
+        if self.recreate == "auto":
+            return self.operations.impl.requires_recreate_in_batch(self)
+        elif self.recreate == "always":
+            return True
+        else:
+            return False
+
+    def flush(self) -> None:
+        should_recreate = self._should_recreate()
+
+        with _ensure_scope_for_ddl(self.impl.connection):
+            if not should_recreate:
+                for opname, arg, kw in self.batch:
+                    fn = getattr(self.operations.impl, opname)
+                    fn(*arg, **kw)
+            else:
+                if self.naming_convention:
+                    m1 = MetaData(naming_convention=self.naming_convention)
+                else:
+                    m1 = MetaData()
+
+                if self.copy_from is not None:
+                    existing_table = self.copy_from
+                    reflected = False
+                else:
+                    if self.operations.migration_context.as_sql:
+                        raise exc.CommandError(
+                            f"This operation cannot proceed in --sql mode; "
+                            f"batch mode with dialect "
+                            f"{self.operations.migration_context.dialect.name} "  # noqa: E501
+                            f"requires a live database connection with which "
+                            f'to reflect the table "{self.table_name}". '
+                            f"To generate a batch SQL migration script using "
+                            "table "
+                            '"move and copy", a complete Table object '
+                            f'should be passed to the "copy_from" argument '
+                            "of the batch_alter_table() method so that table "
+                            "reflection can be skipped."
+                        )
+
+                    existing_table = Table(
+                        self.table_name,
+                        m1,
+                        schema=self.schema,
+                        autoload_with=self.operations.get_bind(),
+                        *self.reflect_args,
+                        **self.reflect_kwargs,
+                    )
+                    reflected = True
+
+                batch_impl = ApplyBatchImpl(
+                    self.impl,
+                    existing_table,
+                    self.table_args,
+                    self.table_kwargs,
+                    reflected,
+                    partial_reordering=self.partial_reordering,
+                )
+                for opname, arg, kw in self.batch:
+                    fn = getattr(batch_impl, opname)
+                    fn(*arg, **kw)
+
+                batch_impl._create(self.impl)
+
+    def alter_column(self, *arg, **kw) -> None:
+        self.batch.append(("alter_column", arg, kw))
+
+    def add_column(self, *arg, **kw) -> None:
+        if (
+            "insert_before" in kw or "insert_after" in kw
+        ) and not self._should_recreate():
+            raise exc.CommandError(
+                "Can't specify insert_before or insert_after when using "
+                "ALTER; please specify recreate='always'"
+            )
+        self.batch.append(("add_column", arg, kw))
+
+    def drop_column(self, *arg, **kw) -> None:
+        self.batch.append(("drop_column", arg, kw))
+
+    def add_constraint(self, const: Constraint) -> None:
+        self.batch.append(("add_constraint", (const,), {}))
+
+    def drop_constraint(self, const: Constraint) -> None:
+        self.batch.append(("drop_constraint", (const,), {}))
+
+    def rename_table(self, *arg, **kw):
+        self.batch.append(("rename_table", arg, kw))
+
+    def create_index(self, idx: Index, **kw: Any) -> None:
+        self.batch.append(("create_index", (idx,), kw))
+
+    def drop_index(self, idx: Index, **kw: Any) -> None:
+        self.batch.append(("drop_index", (idx,), kw))
+
+    def create_table_comment(self, table):
+        self.batch.append(("create_table_comment", (table,), {}))
+
+    def drop_table_comment(self, table):
+        self.batch.append(("drop_table_comment", (table,), {}))
+
+    def create_table(self, table):
+        raise NotImplementedError("Can't create table in batch mode")
+
+    def drop_table(self, table):
+        raise NotImplementedError("Can't drop table in batch mode")
+
+    def create_column_comment(self, column):
+        self.batch.append(("create_column_comment", (column,), {}))
+
+
+class ApplyBatchImpl:
+    def __init__(
+        self,
+        impl: DefaultImpl,
+        table: Table,
+        table_args: tuple,
+        table_kwargs: Dict[str, Any],
+        reflected: bool,
+        partial_reordering: tuple = (),
+    ) -> None:
+        self.impl = impl
+        self.table = table  # this is a Table object
+        self.table_args = table_args
+        self.table_kwargs = table_kwargs
+        self.temp_table_name = self._calc_temp_name(table.name)
+        self.new_table: Optional[Table] = None
+
+        self.partial_reordering = partial_reordering  # tuple of tuples
+        self.add_col_ordering: Tuple[
+            Tuple[str, str], ...
+        ] = ()  # tuple of tuples
+
+        self.column_transfers = OrderedDict(
+            (c.name, {"expr": c}) for c in self.table.c
+        )
+        self.existing_ordering = list(self.column_transfers)
+
+        self.reflected = reflected
+        self._grab_table_elements()
+
+    @classmethod
+    def _calc_temp_name(cls, tablename: Union[quoted_name, str]) -> str:
+        return ("_alembic_tmp_%s" % tablename)[0:50]
+
+    def _grab_table_elements(self) -> None:
+        schema = self.table.schema
+        self.columns: Dict[str, Column[Any]] = OrderedDict()
+        for c in self.table.c:
+            c_copy = _copy(c, schema=schema)
+            c_copy.unique = c_copy.index = False
+            # ensure that the type object was copied,
+            # as we may need to modify it in-place
+            if isinstance(c.type, SchemaEventTarget):
+                assert c_copy.type is not c.type
+            self.columns[c.name] = c_copy
+        self.named_constraints: Dict[str, Constraint] = {}
+        self.unnamed_constraints = []
+        self.col_named_constraints = {}
+        self.indexes: Dict[str, Index] = {}
+        self.new_indexes: Dict[str, Index] = {}
+
+        for const in self.table.constraints:
+            if _is_type_bound(const):
+                continue
+            elif (
+                self.reflected
+                and isinstance(const, CheckConstraint)
+                and not const.name
+            ):
+                # TODO: we are skipping unnamed reflected CheckConstraint
+                # because
+                # we have no way to determine _is_type_bound() for these.
+                pass
+            elif constraint_name_string(const.name):
+                self.named_constraints[const.name] = const
+            else:
+                self.unnamed_constraints.append(const)
+
+        if not self.reflected:
+            for col in self.table.c:
+                for const in col.constraints:
+                    if const.name:
+                        self.col_named_constraints[const.name] = (col, const)
+
+        for idx in self.table.indexes:
+            self.indexes[idx.name] = idx  # type: ignore[index]
+
+        for k in self.table.kwargs:
+            self.table_kwargs.setdefault(k, self.table.kwargs[k])
+
+    def _adjust_self_columns_for_partial_reordering(self) -> None:
+        pairs = set()
+
+        col_by_idx = list(self.columns)
+
+        if self.partial_reordering:
+            for tuple_ in self.partial_reordering:
+                for index, elem in enumerate(tuple_):
+                    if index > 0:
+                        pairs.add((tuple_[index - 1], elem))
+        else:
+            for index, elem in enumerate(self.existing_ordering):
+                if index > 0:
+                    pairs.add((col_by_idx[index - 1], elem))
+
+        pairs.update(self.add_col_ordering)
+
+        # this can happen if some columns were dropped and not removed
+        # from existing_ordering.  this should be prevented already, but
+        # conservatively making sure this didn't happen
+        pairs_list = [p for p in pairs if p[0] != p[1]]
+
+        sorted_ = list(
+            topological.sort(pairs_list, col_by_idx, deterministic_order=True)
+        )
+        self.columns = OrderedDict((k, self.columns[k]) for k in sorted_)
+        self.column_transfers = OrderedDict(
+            (k, self.column_transfers[k]) for k in sorted_
+        )
+
+    def _transfer_elements_to_new_table(self) -> None:
+        assert self.new_table is None, "Can only create new table once"
+
+        m = MetaData()
+        schema = self.table.schema
+
+        if self.partial_reordering or self.add_col_ordering:
+            self._adjust_self_columns_for_partial_reordering()
+
+        self.new_table = new_table = Table(
+            self.temp_table_name,
+            m,
+            *(list(self.columns.values()) + list(self.table_args)),
+            schema=schema,
+            **self.table_kwargs,
+        )
+
+        for const in (
+            list(self.named_constraints.values()) + self.unnamed_constraints
+        ):
+            const_columns = {c.key for c in _columns_for_constraint(const)}
+
+            if not const_columns.issubset(self.column_transfers):
+                continue
+
+            const_copy: Constraint
+            if isinstance(const, ForeignKeyConstraint):
+                if _fk_is_self_referential(const):
+                    # for self-referential constraint, refer to the
+                    # *original* table name, and not _alembic_batch_temp.
+                    # This is consistent with how we're handling
+                    # FK constraints from other tables; we assume SQLite
+                    # no foreign keys just keeps the names unchanged, so
+                    # when we rename back, they match again.
+                    const_copy = _copy(
+                        const, schema=schema, target_table=self.table
+                    )
+                else:
+                    # "target_table" for ForeignKeyConstraint.copy() is
+                    # only used if the FK is detected as being
+                    # self-referential, which we are handling above.
+                    const_copy = _copy(const, schema=schema)
+            else:
+                const_copy = _copy(
+                    const, schema=schema, target_table=new_table
+                )
+            if isinstance(const, ForeignKeyConstraint):
+                self._setup_referent(m, const)
+            new_table.append_constraint(const_copy)
+
+    def _gather_indexes_from_both_tables(self) -> List[Index]:
+        assert self.new_table is not None
+        idx: List[Index] = []
+
+        for idx_existing in self.indexes.values():
+            # this is a lift-and-move from Table.to_metadata
+
+            if idx_existing._column_flag:
+                continue
+
+            idx_copy = Index(
+                idx_existing.name,
+                unique=idx_existing.unique,
+                *[
+                    _copy_expression(expr, self.new_table)
+                    for expr in _idx_table_bound_expressions(idx_existing)
+                ],
+                _table=self.new_table,
+                **idx_existing.kwargs,
+            )
+            idx.append(idx_copy)
+
+        for index in self.new_indexes.values():
+            idx.append(
+                Index(
+                    index.name,
+                    unique=index.unique,
+                    *[self.new_table.c[col] for col in index.columns.keys()],
+                    **index.kwargs,
+                )
+            )
+        return idx
+
+    def _setup_referent(
+        self, metadata: MetaData, constraint: ForeignKeyConstraint
+    ) -> None:
+        spec = constraint.elements[0]._get_colspec()
+        parts = spec.split(".")
+        tname = parts[-2]
+        if len(parts) == 3:
+            referent_schema = parts[0]
+        else:
+            referent_schema = None
+
+        if tname != self.temp_table_name:
+            key = sql_schema._get_table_key(tname, referent_schema)
+
+            def colspec(elem: Any):
+                return elem._get_colspec()
+
+            if key in metadata.tables:
+                t = metadata.tables[key]
+                for elem in constraint.elements:
+                    colname = colspec(elem).split(".")[-1]
+                    if colname not in t.c:
+                        t.append_column(Column(colname, sqltypes.NULLTYPE))
+            else:
+                Table(
+                    tname,
+                    metadata,
+                    *[
+                        Column(n, sqltypes.NULLTYPE)
+                        for n in [
+                            colspec(elem).split(".")[-1]
+                            for elem in constraint.elements
+                        ]
+                    ],
+                    schema=referent_schema,
+                )
+
+    def _create(self, op_impl: DefaultImpl) -> None:
+        self._transfer_elements_to_new_table()
+
+        op_impl.prep_table_for_batch(self, self.table)
+        assert self.new_table is not None
+        op_impl.create_table(self.new_table)
+
+        try:
+            op_impl._exec(
+                _insert_inline(self.new_table).from_select(
+                    list(
+                        k
+                        for k, transfer in self.column_transfers.items()
+                        if "expr" in transfer
+                    ),
+                    _select(
+                        *[
+                            transfer["expr"]
+                            for transfer in self.column_transfers.values()
+                            if "expr" in transfer
+                        ]
+                    ),
+                )
+            )
+            op_impl.drop_table(self.table)
+        except:
+            op_impl.drop_table(self.new_table)
+            raise
+        else:
+            op_impl.rename_table(
+                self.temp_table_name, self.table.name, schema=self.table.schema
+            )
+            self.new_table.name = self.table.name
+            try:
+                for idx in self._gather_indexes_from_both_tables():
+                    op_impl.create_index(idx)
+            finally:
+                self.new_table.name = self.temp_table_name
+
+    def alter_column(
+        self,
+        table_name: str,
+        column_name: str,
+        nullable: Optional[bool] = None,
+        server_default: Optional[Union[Function[Any], str, bool]] = False,
+        name: Optional[str] = None,
+        type_: Optional[TypeEngine] = None,
+        autoincrement: Optional[Union[bool, Literal["auto"]]] = None,
+        comment: Union[str, Literal[False]] = False,
+        **kw,
+    ) -> None:
+        existing = self.columns[column_name]
+        existing_transfer: Dict[str, Any] = self.column_transfers[column_name]
+        if name is not None and name != column_name:
+            # note that we don't change '.key' - we keep referring
+            # to the renamed column by its old key in _create().  neat!
+            existing.name = name
+            existing_transfer["name"] = name
+
+            existing_type = kw.get("existing_type", None)
+            if existing_type:
+                resolved_existing_type = _resolve_for_variant(
+                    kw["existing_type"], self.impl.dialect
+                )
+
+                # pop named constraints for Boolean/Enum for rename
+                if (
+                    isinstance(resolved_existing_type, SchemaEventTarget)
+                    and resolved_existing_type.name  # type:ignore[attr-defined]  # noqa E501
+                ):
+                    self.named_constraints.pop(
+                        resolved_existing_type.name,  # type:ignore[attr-defined]  # noqa E501
+                        None,
+                    )
+
+        if type_ is not None:
+            type_ = sqltypes.to_instance(type_)
+            # old type is being discarded so turn off eventing
+            # rules. Alternatively we can
+            # erase the events set up by this type, but this is simpler.
+            # we also ignore the drop_constraint that will come here from
+            # Operations.implementation_for(alter_column)
+
+            if isinstance(existing.type, SchemaEventTarget):
+                existing.type._create_events = (  # type:ignore[attr-defined]
+                    existing.type.create_constraint  # type:ignore[attr-defined] # noqa
+                ) = False
+
+            self.impl.cast_for_batch_migrate(
+                existing, existing_transfer, type_
+            )
+
+            existing.type = type_
+
+            # we *dont* however set events for the new type, because
+            # alter_column is invoked from
+            # Operations.implementation_for(alter_column) which already
+            # will emit an add_constraint()
+
+        if nullable is not None:
+            existing.nullable = nullable
+        if server_default is not False:
+            if server_default is None:
+                existing.server_default = None
+            else:
+                sql_schema.DefaultClause(
+                    server_default  # type: ignore[arg-type]
+                )._set_parent(existing)
+        if autoincrement is not None:
+            existing.autoincrement = bool(autoincrement)
+
+        if comment is not False:
+            existing.comment = comment
+
+    def _setup_dependencies_for_add_column(
+        self,
+        colname: str,
+        insert_before: Optional[str],
+        insert_after: Optional[str],
+    ) -> None:
+        index_cols = self.existing_ordering
+        col_indexes = {name: i for i, name in enumerate(index_cols)}
+
+        if not self.partial_reordering:
+            if insert_after:
+                if not insert_before:
+                    if insert_after in col_indexes:
+                        # insert after an existing column
+                        idx = col_indexes[insert_after] + 1
+                        if idx < len(index_cols):
+                            insert_before = index_cols[idx]
+                    else:
+                        # insert after a column that is also new
+                        insert_before = dict(self.add_col_ordering)[
+                            insert_after
+                        ]
+            if insert_before:
+                if not insert_after:
+                    if insert_before in col_indexes:
+                        # insert before an existing column
+                        idx = col_indexes[insert_before] - 1
+                        if idx >= 0:
+                            insert_after = index_cols[idx]
+                    else:
+                        # insert before a column that is also new
+                        insert_after = {
+                            b: a for a, b in self.add_col_ordering
+                        }[insert_before]
+
+        if insert_before:
+            self.add_col_ordering += ((colname, insert_before),)
+        if insert_after:
+            self.add_col_ordering += ((insert_after, colname),)
+
+        if (
+            not self.partial_reordering
+            and not insert_before
+            and not insert_after
+            and col_indexes
+        ):
+            self.add_col_ordering += ((index_cols[-1], colname),)
+
+    def add_column(
+        self,
+        table_name: str,
+        column: Column[Any],
+        insert_before: Optional[str] = None,
+        insert_after: Optional[str] = None,
+        **kw,
+    ) -> None:
+        self._setup_dependencies_for_add_column(
+            column.name, insert_before, insert_after
+        )
+        # we copy the column because operations.add_column()
+        # gives us a Column that is part of a Table already.
+        self.columns[column.name] = _copy(column, schema=self.table.schema)
+        self.column_transfers[column.name] = {}
+
+    def drop_column(
+        self,
+        table_name: str,
+        column: Union[ColumnClause[Any], Column[Any]],
+        **kw,
+    ) -> None:
+        if column.name in self.table.primary_key.columns:
+            _remove_column_from_collection(
+                self.table.primary_key.columns, column
+            )
+        del self.columns[column.name]
+        del self.column_transfers[column.name]
+        self.existing_ordering.remove(column.name)
+
+        # pop named constraints for Boolean/Enum for rename
+        if (
+            "existing_type" in kw
+            and isinstance(kw["existing_type"], SchemaEventTarget)
+            and kw["existing_type"].name  # type:ignore[attr-defined]
+        ):
+            self.named_constraints.pop(
+                kw["existing_type"].name, None  # type:ignore[attr-defined]
+            )
+
+    def create_column_comment(self, column):
+        """the batch table creation function will issue create_column_comment
+        on the real "impl" as part of the create table process.
+
+        That is, the Column object will have the comment on it already,
+        so when it is received by add_column() it will be a normal part of
+        the CREATE TABLE and doesn't need an extra step here.
+
+        """
+
+    def create_table_comment(self, table):
+        """the batch table creation function will issue create_table_comment
+        on the real "impl" as part of the create table process.
+
+        """
+
+    def drop_table_comment(self, table):
+        """the batch table creation function will issue drop_table_comment
+        on the real "impl" as part of the create table process.
+
+        """
+
+    def add_constraint(self, const: Constraint) -> None:
+        if not constraint_name_defined(const.name):
+            raise ValueError("Constraint must have a name")
+        if isinstance(const, sql_schema.PrimaryKeyConstraint):
+            if self.table.primary_key in self.unnamed_constraints:
+                self.unnamed_constraints.remove(self.table.primary_key)
+
+        if constraint_name_string(const.name):
+            self.named_constraints[const.name] = const
+        else:
+            self.unnamed_constraints.append(const)
+
+    def drop_constraint(self, const: Constraint) -> None:
+        if not const.name:
+            raise ValueError("Constraint must have a name")
+        try:
+            if const.name in self.col_named_constraints:
+                col, const = self.col_named_constraints.pop(const.name)
+
+                for col_const in list(self.columns[col.name].constraints):
+                    if col_const.name == const.name:
+                        self.columns[col.name].constraints.remove(col_const)
+            elif constraint_name_string(const.name):
+                const = self.named_constraints.pop(const.name)
+            elif const in self.unnamed_constraints:
+                self.unnamed_constraints.remove(const)
+
+        except KeyError:
+            if _is_type_bound(const):
+                # type-bound constraints are only included in the new
+                # table via their type object in any case, so ignore the
+                # drop_constraint() that comes here via the
+                # Operations.implementation_for(alter_column)
+                return
+            raise ValueError("No such constraint: '%s'" % const.name)
+        else:
+            if isinstance(const, PrimaryKeyConstraint):
+                for col in const.columns:
+                    self.columns[col.name].primary_key = False
+
+    def create_index(self, idx: Index) -> None:
+        self.new_indexes[idx.name] = idx  # type: ignore[index]
+
+    def drop_index(self, idx: Index) -> None:
+        try:
+            del self.indexes[idx.name]  # type: ignore[arg-type]
+        except KeyError:
+            raise ValueError("No such index: '%s'" % idx.name)
+
+    def rename_table(self, *arg, **kw):
+        raise NotImplementedError("TODO")
diff --git a/venv/Lib/site-packages/alembic/operations/ops.py b/venv/Lib/site-packages/alembic/operations/ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b65191cf20fa5bc1be08c646247dee611f4f4fe
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/operations/ops.py
@@ -0,0 +1,2786 @@
+from __future__ import annotations
+
+from abc import abstractmethod
+import re
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Dict
+from typing import FrozenSet
+from typing import Iterator
+from typing import List
+from typing import MutableMapping
+from typing import Optional
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy.types import NULLTYPE
+
+from . import schemaobj
+from .base import BatchOperations
+from .base import Operations
+from .. import util
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import conv
+    from sqlalchemy.sql.elements import quoted_name
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.functions import Function
+    from sqlalchemy.sql.schema import CheckConstraint
+    from sqlalchemy.sql.schema import Column
+    from sqlalchemy.sql.schema import Computed
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import ForeignKeyConstraint
+    from sqlalchemy.sql.schema import Identity
+    from sqlalchemy.sql.schema import Index
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import PrimaryKeyConstraint
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.schema import UniqueConstraint
+    from sqlalchemy.sql.selectable import TableClause
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from ..autogenerate.rewriter import Rewriter
+    from ..runtime.migration import MigrationContext
+    from ..script.revision import _RevIdType
+
+_T = TypeVar("_T", bound=Any)
+_AC = TypeVar("_AC", bound="AddConstraintOp")
+
+
+class MigrateOperation:
+    """base class for migration command and organization objects.
+
+    This system is part of the operation extensibility API.
+
+    .. seealso::
+
+        :ref:`operation_objects`
+
+        :ref:`operation_plugins`
+
+        :ref:`customizing_revision`
+
+    """
+
+    @util.memoized_property
+    def info(self) -> Dict[Any, Any]:
+        """A dictionary that may be used to store arbitrary information
+        along with this :class:`.MigrateOperation` object.
+
+        """
+        return {}
+
+    _mutations: FrozenSet[Rewriter] = frozenset()
+
+    def reverse(self) -> MigrateOperation:
+        raise NotImplementedError
+
+    def to_diff_tuple(self) -> Tuple[Any, ...]:
+        raise NotImplementedError
+
+
+class AddConstraintOp(MigrateOperation):
+    """Represent an add constraint operation."""
+
+    add_constraint_ops = util.Dispatcher()
+
+    @property
+    def constraint_type(self) -> str:
+        raise NotImplementedError()
+
+    @classmethod
+    def register_add_constraint(
+        cls, type_: str
+    ) -> Callable[[Type[_AC]], Type[_AC]]:
+        def go(klass: Type[_AC]) -> Type[_AC]:
+            cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint)
+            return klass
+
+        return go
+
+    @classmethod
+    def from_constraint(cls, constraint: Constraint) -> AddConstraintOp:
+        return cls.add_constraint_ops.dispatch(constraint.__visit_name__)(  # type: ignore[no-any-return]  # noqa: E501
+            constraint
+        )
+
+    @abstractmethod
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Constraint:
+        pass
+
+    def reverse(self) -> DropConstraintOp:
+        return DropConstraintOp.from_constraint(self.to_constraint())
+
+    def to_diff_tuple(self) -> Tuple[str, Constraint]:
+        return ("add_constraint", self.to_constraint())
+
+
+@Operations.register_operation("drop_constraint")
+@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint")
+class DropConstraintOp(MigrateOperation):
+    """Represent a drop constraint operation."""
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        type_: Optional[str] = None,
+        *,
+        schema: Optional[str] = None,
+        _reverse: Optional[AddConstraintOp] = None,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.constraint_type = type_
+        self.schema = schema
+        self._reverse = _reverse
+
+    def reverse(self) -> AddConstraintOp:
+        return AddConstraintOp.from_constraint(self.to_constraint())
+
+    def to_diff_tuple(
+        self,
+    ) -> Tuple[str, SchemaItem]:
+        if self.constraint_type == "foreignkey":
+            return ("remove_fk", self.to_constraint())
+        else:
+            return ("remove_constraint", self.to_constraint())
+
+    @classmethod
+    def from_constraint(cls, constraint: Constraint) -> DropConstraintOp:
+        types = {
+            "unique_constraint": "unique",
+            "foreign_key_constraint": "foreignkey",
+            "primary_key_constraint": "primary",
+            "check_constraint": "check",
+            "column_check_constraint": "check",
+            "table_or_column_check_constraint": "check",
+        }
+
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+        return cls(
+            sqla_compat.constraint_name_or_none(constraint.name),
+            constraint_table.name,
+            schema=constraint_table.schema,
+            type_=types.get(constraint.__visit_name__),
+            _reverse=AddConstraintOp.from_constraint(constraint),
+        )
+
+    def to_constraint(self) -> Constraint:
+        if self._reverse is not None:
+            constraint = self._reverse.to_constraint()
+            constraint.name = self.constraint_name
+            constraint_table = sqla_compat._table_for_constraint(constraint)
+            constraint_table.name = self.table_name
+            constraint_table.schema = self.schema
+
+            return constraint
+        else:
+            raise ValueError(
+                "constraint cannot be produced; "
+                "original constraint is not present"
+            )
+
+    @classmethod
+    def drop_constraint(
+        cls,
+        operations: Operations,
+        constraint_name: str,
+        table_name: str,
+        type_: Optional[str] = None,
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
+
+        :param constraint_name: name of the constraint.
+        :param table_name: table name.
+        :param type\_: optional, required on MySQL.  can be
+         'foreignkey', 'primary', 'unique', or 'check'.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+
+        op = cls(constraint_name, table_name, type_=type_, schema=schema)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_drop_constraint(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        type_: Optional[str] = None,
+    ) -> None:
+        """Issue a "drop constraint" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``table_name`` and ``schema``
+        arguments from the call.
+
+        .. seealso::
+
+            :meth:`.Operations.drop_constraint`
+
+        """
+        op = cls(
+            constraint_name,
+            operations.impl.table_name,
+            type_=type_,
+            schema=operations.impl.schema,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_primary_key")
+@BatchOperations.register_operation(
+    "create_primary_key", "batch_create_primary_key"
+)
+@AddConstraintOp.register_add_constraint("primary_key_constraint")
+class CreatePrimaryKeyOp(AddConstraintOp):
+    """Represent a create primary key operation."""
+
+    constraint_type = "primarykey"
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        columns: Sequence[str],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.columns = columns
+        self.schema = schema
+        self.kw = kw
+
+    @classmethod
+    def from_constraint(cls, constraint: Constraint) -> CreatePrimaryKeyOp:
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+        pk_constraint = cast("PrimaryKeyConstraint", constraint)
+        return cls(
+            sqla_compat.constraint_name_or_none(pk_constraint.name),
+            constraint_table.name,
+            pk_constraint.columns.keys(),
+            schema=constraint_table.schema,
+            **pk_constraint.dialect_kwargs,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> PrimaryKeyConstraint:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        return schema_obj.primary_key_constraint(
+            self.constraint_name,
+            self.table_name,
+            self.columns,
+            schema=self.schema,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_primary_key(
+        cls,
+        operations: Operations,
+        constraint_name: Optional[str],
+        table_name: str,
+        columns: List[str],
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Issue a "create primary key" instruction using the current
+        migration context.
+
+        e.g.::
+
+            from alembic import op
+
+            op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
+
+        This internally generates a :class:`~sqlalchemy.schema.Table` object
+        containing the necessary columns, then generates a new
+        :class:`~sqlalchemy.schema.PrimaryKeyConstraint`
+        object which it then associates with the
+        :class:`~sqlalchemy.schema.Table`.
+        Any event listeners associated with this action will be fired
+        off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+        construct is ultimately used to generate the ALTER statement.
+
+        :param constraint_name: Name of the primary key constraint.  The name
+         is necessary so that an ALTER statement can be emitted.  For setups
+         that use an automated naming scheme such as that described at
+         :ref:`sqla:constraint_naming_conventions`
+         ``name`` here can be ``None``, as the event listener will
+         apply the name to the constraint object when it is associated
+         with the table.
+        :param table_name: String name of the target table.
+        :param columns: a list of string column names to be applied to the
+         primary key constraint.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+        op = cls(constraint_name, table_name, columns, schema=schema)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_primary_key(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        columns: List[str],
+    ) -> None:
+        """Issue a "create primary key" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``table_name`` and ``schema``
+        arguments from the call.
+
+        .. seealso::
+
+            :meth:`.Operations.create_primary_key`
+
+        """
+        op = cls(
+            constraint_name,
+            operations.impl.table_name,
+            columns,
+            schema=operations.impl.schema,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_unique_constraint")
+@BatchOperations.register_operation(
+    "create_unique_constraint", "batch_create_unique_constraint"
+)
+@AddConstraintOp.register_add_constraint("unique_constraint")
+class CreateUniqueConstraintOp(AddConstraintOp):
+    """Represent a create unique constraint operation."""
+
+    constraint_type = "unique"
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        columns: Sequence[str],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.columns = columns
+        self.schema = schema
+        self.kw = kw
+
+    @classmethod
+    def from_constraint(
+        cls, constraint: Constraint
+    ) -> CreateUniqueConstraintOp:
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+
+        uq_constraint = cast("UniqueConstraint", constraint)
+
+        kw: Dict[str, Any] = {}
+        if uq_constraint.deferrable:
+            kw["deferrable"] = uq_constraint.deferrable
+        if uq_constraint.initially:
+            kw["initially"] = uq_constraint.initially
+        kw.update(uq_constraint.dialect_kwargs)
+        return cls(
+            sqla_compat.constraint_name_or_none(uq_constraint.name),
+            constraint_table.name,
+            [c.name for c in uq_constraint.columns],
+            schema=constraint_table.schema,
+            **kw,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> UniqueConstraint:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        return schema_obj.unique_constraint(
+            self.constraint_name,
+            self.table_name,
+            self.columns,
+            schema=self.schema,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_unique_constraint(
+        cls,
+        operations: Operations,
+        constraint_name: Optional[str],
+        table_name: str,
+        columns: Sequence[str],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> Any:
+        """Issue a "create unique constraint" instruction using the
+        current migration context.
+
+        e.g.::
+
+            from alembic import op
+            op.create_unique_constraint("uq_user_name", "user", ["name"])
+
+        This internally generates a :class:`~sqlalchemy.schema.Table` object
+        containing the necessary columns, then generates a new
+        :class:`~sqlalchemy.schema.UniqueConstraint`
+        object which it then associates with the
+        :class:`~sqlalchemy.schema.Table`.
+        Any event listeners associated with this action will be fired
+        off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+        construct is ultimately used to generate the ALTER statement.
+
+        :param name: Name of the unique constraint.  The name is necessary
+         so that an ALTER statement can be emitted.  For setups that
+         use an automated naming scheme such as that described at
+         :ref:`sqla:constraint_naming_conventions`,
+         ``name`` here can be ``None``, as the event listener will
+         apply the name to the constraint object when it is associated
+         with the table.
+        :param table_name: String name of the source table.
+        :param columns: a list of string column names in the
+         source table.
+        :param deferrable: optional bool. If set, emit DEFERRABLE or
+         NOT DEFERRABLE when issuing DDL for this constraint.
+        :param initially: optional string. If set, emit INITIALLY <value>
+         when issuing DDL for this constraint.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+
+        op = cls(constraint_name, table_name, columns, schema=schema, **kw)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_unique_constraint(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        columns: Sequence[str],
+        **kw: Any,
+    ) -> Any:
+        """Issue a "create unique constraint" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``source`` and ``schema``
+        arguments from the call.
+
+        .. seealso::
+
+            :meth:`.Operations.create_unique_constraint`
+
+        """
+        kw["schema"] = operations.impl.schema
+        op = cls(constraint_name, operations.impl.table_name, columns, **kw)
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_foreign_key")
+@BatchOperations.register_operation(
+    "create_foreign_key", "batch_create_foreign_key"
+)
+@AddConstraintOp.register_add_constraint("foreign_key_constraint")
+class CreateForeignKeyOp(AddConstraintOp):
+    """Represent a create foreign key constraint operation."""
+
+    constraint_type = "foreignkey"
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        source_table: str,
+        referent_table: str,
+        local_cols: List[str],
+        remote_cols: List[str],
+        **kw: Any,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.source_table = source_table
+        self.referent_table = referent_table
+        self.local_cols = local_cols
+        self.remote_cols = remote_cols
+        self.kw = kw
+
+    def to_diff_tuple(self) -> Tuple[str, ForeignKeyConstraint]:
+        return ("add_fk", self.to_constraint())
+
+    @classmethod
+    def from_constraint(cls, constraint: Constraint) -> CreateForeignKeyOp:
+        fk_constraint = cast("ForeignKeyConstraint", constraint)
+        kw: Dict[str, Any] = {}
+        if fk_constraint.onupdate:
+            kw["onupdate"] = fk_constraint.onupdate
+        if fk_constraint.ondelete:
+            kw["ondelete"] = fk_constraint.ondelete
+        if fk_constraint.initially:
+            kw["initially"] = fk_constraint.initially
+        if fk_constraint.deferrable:
+            kw["deferrable"] = fk_constraint.deferrable
+        if fk_constraint.use_alter:
+            kw["use_alter"] = fk_constraint.use_alter
+        if fk_constraint.match:
+            kw["match"] = fk_constraint.match
+
+        (
+            source_schema,
+            source_table,
+            source_columns,
+            target_schema,
+            target_table,
+            target_columns,
+            onupdate,
+            ondelete,
+            deferrable,
+            initially,
+        ) = sqla_compat._fk_spec(fk_constraint)
+
+        kw["source_schema"] = source_schema
+        kw["referent_schema"] = target_schema
+        kw.update(fk_constraint.dialect_kwargs)
+        return cls(
+            sqla_compat.constraint_name_or_none(fk_constraint.name),
+            source_table,
+            target_table,
+            source_columns,
+            target_columns,
+            **kw,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> ForeignKeyConstraint:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        return schema_obj.foreign_key_constraint(
+            self.constraint_name,
+            self.source_table,
+            self.referent_table,
+            self.local_cols,
+            self.remote_cols,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_foreign_key(
+        cls,
+        operations: Operations,
+        constraint_name: Optional[str],
+        source_table: str,
+        referent_table: str,
+        local_cols: List[str],
+        remote_cols: List[str],
+        *,
+        onupdate: Optional[str] = None,
+        ondelete: Optional[str] = None,
+        deferrable: Optional[bool] = None,
+        initially: Optional[str] = None,
+        match: Optional[str] = None,
+        source_schema: Optional[str] = None,
+        referent_schema: Optional[str] = None,
+        **dialect_kw: Any,
+    ) -> None:
+        """Issue a "create foreign key" instruction using the
+        current migration context.
+
+        e.g.::
+
+            from alembic import op
+
+            op.create_foreign_key(
+                "fk_user_address",
+                "address",
+                "user",
+                ["user_id"],
+                ["id"],
+            )
+
+        This internally generates a :class:`~sqlalchemy.schema.Table` object
+        containing the necessary columns, then generates a new
+        :class:`~sqlalchemy.schema.ForeignKeyConstraint`
+        object which it then associates with the
+        :class:`~sqlalchemy.schema.Table`.
+        Any event listeners associated with this action will be fired
+        off normally.   The :class:`~sqlalchemy.schema.AddConstraint`
+        construct is ultimately used to generate the ALTER statement.
+
+        :param constraint_name: Name of the foreign key constraint.  The name
+         is necessary so that an ALTER statement can be emitted.  For setups
+         that use an automated naming scheme such as that described at
+         :ref:`sqla:constraint_naming_conventions`,
+         ``name`` here can be ``None``, as the event listener will
+         apply the name to the constraint object when it is associated
+         with the table.
+        :param source_table: String name of the source table.
+        :param referent_table: String name of the destination table.
+        :param local_cols: a list of string column names in the
+         source table.
+        :param remote_cols: a list of string column names in the
+         remote table.
+        :param onupdate: Optional string. If set, emit ON UPDATE <value> when
+         issuing DDL for this constraint. Typical values include CASCADE,
+         DELETE and RESTRICT.
+        :param ondelete: Optional string. If set, emit ON DELETE <value> when
+         issuing DDL for this constraint. Typical values include CASCADE,
+         DELETE and RESTRICT.
+        :param deferrable: optional bool. If set, emit DEFERRABLE or NOT
+         DEFERRABLE when issuing DDL for this constraint.
+        :param source_schema: Optional schema name of the source table.
+        :param referent_schema: Optional schema name of the destination table.
+
+        """
+
+        op = cls(
+            constraint_name,
+            source_table,
+            referent_table,
+            local_cols,
+            remote_cols,
+            onupdate=onupdate,
+            ondelete=ondelete,
+            deferrable=deferrable,
+            source_schema=source_schema,
+            referent_schema=referent_schema,
+            initially=initially,
+            match=match,
+            **dialect_kw,
+        )
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_foreign_key(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        referent_table: str,
+        local_cols: List[str],
+        remote_cols: List[str],
+        *,
+        referent_schema: Optional[str] = None,
+        onupdate: Optional[str] = None,
+        ondelete: Optional[str] = None,
+        deferrable: Optional[bool] = None,
+        initially: Optional[str] = None,
+        match: Optional[str] = None,
+        **dialect_kw: Any,
+    ) -> None:
+        """Issue a "create foreign key" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``source`` and ``source_schema``
+        arguments from the call.
+
+        e.g.::
+
+            with batch_alter_table("address") as batch_op:
+                batch_op.create_foreign_key(
+                    "fk_user_address",
+                    "user",
+                    ["user_id"],
+                    ["id"],
+                )
+
+        .. seealso::
+
+            :meth:`.Operations.create_foreign_key`
+
+        """
+        op = cls(
+            constraint_name,
+            operations.impl.table_name,
+            referent_table,
+            local_cols,
+            remote_cols,
+            onupdate=onupdate,
+            ondelete=ondelete,
+            deferrable=deferrable,
+            source_schema=operations.impl.schema,
+            referent_schema=referent_schema,
+            initially=initially,
+            match=match,
+            **dialect_kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_check_constraint")
+@BatchOperations.register_operation(
+    "create_check_constraint", "batch_create_check_constraint"
+)
+@AddConstraintOp.register_add_constraint("check_constraint")
+@AddConstraintOp.register_add_constraint("table_or_column_check_constraint")
+@AddConstraintOp.register_add_constraint("column_check_constraint")
+class CreateCheckConstraintOp(AddConstraintOp):
+    """Represent a create check constraint operation."""
+
+    constraint_type = "check"
+
+    def __init__(
+        self,
+        constraint_name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        condition: Union[str, TextClause, ColumnElement[Any]],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        self.constraint_name = constraint_name
+        self.table_name = table_name
+        self.condition = condition
+        self.schema = schema
+        self.kw = kw
+
+    @classmethod
+    def from_constraint(
+        cls, constraint: Constraint
+    ) -> CreateCheckConstraintOp:
+        constraint_table = sqla_compat._table_for_constraint(constraint)
+
+        ck_constraint = cast("CheckConstraint", constraint)
+        return cls(
+            sqla_compat.constraint_name_or_none(ck_constraint.name),
+            constraint_table.name,
+            cast("ColumnElement[Any]", ck_constraint.sqltext),
+            schema=constraint_table.schema,
+            **ck_constraint.dialect_kwargs,
+        )
+
+    def to_constraint(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> CheckConstraint:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        return schema_obj.check_constraint(
+            self.constraint_name,
+            self.table_name,
+            self.condition,
+            schema=self.schema,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_check_constraint(
+        cls,
+        operations: Operations,
+        constraint_name: Optional[str],
+        table_name: str,
+        condition: Union[str, ColumnElement[bool], TextClause],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        """Issue a "create check constraint" instruction using the
+        current migration context.
+
+        e.g.::
+
+            from alembic import op
+            from sqlalchemy.sql import column, func
+
+            op.create_check_constraint(
+                "ck_user_name_len",
+                "user",
+                func.len(column("name")) > 5,
+            )
+
+        CHECK constraints are usually against a SQL expression, so ad-hoc
+        table metadata is usually needed.   The function will convert the given
+        arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
+        to an anonymous table in order to emit the CREATE statement.
+
+        :param name: Name of the check constraint.  The name is necessary
+         so that an ALTER statement can be emitted.  For setups that
+         use an automated naming scheme such as that described at
+         :ref:`sqla:constraint_naming_conventions`,
+         ``name`` here can be ``None``, as the event listener will
+         apply the name to the constraint object when it is associated
+         with the table.
+        :param table_name: String name of the source table.
+        :param condition: SQL expression that's the condition of the
+         constraint. Can be a string or SQLAlchemy expression language
+         structure.
+        :param deferrable: optional bool. If set, emit DEFERRABLE or
+         NOT DEFERRABLE when issuing DDL for this constraint.
+        :param initially: optional string. If set, emit INITIALLY <value>
+         when issuing DDL for this constraint.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+        op = cls(constraint_name, table_name, condition, schema=schema, **kw)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_check_constraint(
+        cls,
+        operations: BatchOperations,
+        constraint_name: str,
+        condition: Union[str, ColumnElement[bool], TextClause],
+        **kw: Any,
+    ) -> None:
+        """Issue a "create check constraint" instruction using the
+        current batch migration context.
+
+        The batch form of this call omits the ``source`` and ``schema``
+        arguments from the call.
+
+        .. seealso::
+
+            :meth:`.Operations.create_check_constraint`
+
+        """
+        op = cls(
+            constraint_name,
+            operations.impl.table_name,
+            condition,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_index")
+@BatchOperations.register_operation("create_index", "batch_create_index")
+class CreateIndexOp(MigrateOperation):
+    """Represent a create index operation."""
+
+    def __init__(
+        self,
+        index_name: Optional[str],
+        table_name: str,
+        columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
+        *,
+        schema: Optional[str] = None,
+        unique: bool = False,
+        if_not_exists: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        self.index_name = index_name
+        self.table_name = table_name
+        self.columns = columns
+        self.schema = schema
+        self.unique = unique
+        self.if_not_exists = if_not_exists
+        self.kw = kw
+
+    def reverse(self) -> DropIndexOp:
+        return DropIndexOp.from_index(self.to_index())
+
+    def to_diff_tuple(self) -> Tuple[str, Index]:
+        return ("add_index", self.to_index())
+
+    @classmethod
+    def from_index(cls, index: Index) -> CreateIndexOp:
+        assert index.table is not None
+        return cls(
+            index.name,
+            index.table.name,
+            index.expressions,
+            schema=index.table.schema,
+            unique=index.unique,
+            **index.kwargs,
+        )
+
+    def to_index(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Index:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        idx = schema_obj.index(
+            self.index_name,
+            self.table_name,
+            self.columns,
+            schema=self.schema,
+            unique=self.unique,
+            **self.kw,
+        )
+        return idx
+
+    @classmethod
+    def create_index(
+        cls,
+        operations: Operations,
+        index_name: Optional[str],
+        table_name: str,
+        columns: Sequence[Union[str, TextClause, Function[Any]]],
+        *,
+        schema: Optional[str] = None,
+        unique: bool = False,
+        if_not_exists: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        r"""Issue a "create index" instruction using the current
+        migration context.
+
+        e.g.::
+
+            from alembic import op
+
+            op.create_index("ik_test", "t1", ["foo", "bar"])
+
+        Functional indexes can be produced by using the
+        :func:`sqlalchemy.sql.expression.text` construct::
+
+            from alembic import op
+            from sqlalchemy import text
+
+            op.create_index("ik_test", "t1", [text("lower(foo)")])
+
+        :param index_name: name of the index.
+        :param table_name: name of the owning table.
+        :param columns: a list consisting of string column names and/or
+         :func:`~sqlalchemy.sql.expression.text` constructs.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param unique: If True, create a unique index.
+
+        :param quote: Force quoting of this column's name on or off,
+         corresponding to ``True`` or ``False``. When left at its default
+         of ``None``, the column identifier will be quoted according to
+         whether the name is case sensitive (identifiers with at least one
+         upper case character are treated as case sensitive), or if it's a
+         reserved word. This flag is only needed to force quoting of a
+         reserved word which is not known by the SQLAlchemy dialect.
+
+        :param if_not_exists: If True, adds IF NOT EXISTS operator when
+         creating the new index.
+
+         .. versionadded:: 1.12.0
+
+        :param \**kw: Additional keyword arguments not mentioned above are
+         dialect specific, and passed in the form
+         ``<dialectname>_<argname>``.
+         See the documentation regarding an individual dialect at
+         :ref:`dialect_toplevel` for detail on documented arguments.
+
+        """
+        op = cls(
+            index_name,
+            table_name,
+            columns,
+            schema=schema,
+            unique=unique,
+            if_not_exists=if_not_exists,
+            **kw,
+        )
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_index(
+        cls,
+        operations: BatchOperations,
+        index_name: str,
+        columns: List[str],
+        **kw: Any,
+    ) -> None:
+        """Issue a "create index" instruction using the
+        current batch migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.create_index`
+
+        """
+
+        op = cls(
+            index_name,
+            operations.impl.table_name,
+            columns,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("drop_index")
+@BatchOperations.register_operation("drop_index", "batch_drop_index")
+class DropIndexOp(MigrateOperation):
+    """Represent a drop index operation."""
+
+    def __init__(
+        self,
+        index_name: Union[quoted_name, str, conv],
+        table_name: Optional[str] = None,
+        *,
+        schema: Optional[str] = None,
+        if_exists: Optional[bool] = None,
+        _reverse: Optional[CreateIndexOp] = None,
+        **kw: Any,
+    ) -> None:
+        self.index_name = index_name
+        self.table_name = table_name
+        self.schema = schema
+        self.if_exists = if_exists
+        self._reverse = _reverse
+        self.kw = kw
+
+    def to_diff_tuple(self) -> Tuple[str, Index]:
+        return ("remove_index", self.to_index())
+
+    def reverse(self) -> CreateIndexOp:
+        return CreateIndexOp.from_index(self.to_index())
+
+    @classmethod
+    def from_index(cls, index: Index) -> DropIndexOp:
+        assert index.table is not None
+        return cls(
+            index.name,  # type: ignore[arg-type]
+            table_name=index.table.name,
+            schema=index.table.schema,
+            _reverse=CreateIndexOp.from_index(index),
+            unique=index.unique,
+            **index.kwargs,
+        )
+
+    def to_index(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Index:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        # need a dummy column name here since SQLAlchemy
+        # 0.7.6 and further raises on Index with no columns
+        return schema_obj.index(
+            self.index_name,
+            self.table_name,
+            self._reverse.columns if self._reverse else ["x"],
+            schema=self.schema,
+            **self.kw,
+        )
+
+    @classmethod
+    def drop_index(
+        cls,
+        operations: Operations,
+        index_name: str,
+        table_name: Optional[str] = None,
+        *,
+        schema: Optional[str] = None,
+        if_exists: Optional[bool] = None,
+        **kw: Any,
+    ) -> None:
+        r"""Issue a "drop index" instruction using the current
+        migration context.
+
+        e.g.::
+
+            drop_index("accounts")
+
+        :param index_name: name of the index.
+        :param table_name: name of the owning table.  Some
+         backends such as Microsoft SQL Server require this.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        :param if_exists: If True, adds IF EXISTS operator when
+         dropping the index.
+
+         .. versionadded:: 1.12.0
+
+        :param \**kw: Additional keyword arguments not mentioned above are
+         dialect specific, and passed in the form
+         ``<dialectname>_<argname>``.
+         See the documentation regarding an individual dialect at
+         :ref:`dialect_toplevel` for detail on documented arguments.
+
+        """
+        op = cls(
+            index_name,
+            table_name=table_name,
+            schema=schema,
+            if_exists=if_exists,
+            **kw,
+        )
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_drop_index(
+        cls, operations: BatchOperations, index_name: str, **kw: Any
+    ) -> None:
+        """Issue a "drop index" instruction using the
+        current batch migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.drop_index`
+
+        """
+
+        op = cls(
+            index_name,
+            table_name=operations.impl.table_name,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_table")
+class CreateTableOp(MigrateOperation):
+    """Represent a create table operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        columns: Sequence[SchemaItem],
+        *,
+        schema: Optional[str] = None,
+        _namespace_metadata: Optional[MetaData] = None,
+        _constraints_included: bool = False,
+        **kw: Any,
+    ) -> None:
+        self.table_name = table_name
+        self.columns = columns
+        self.schema = schema
+        self.info = kw.pop("info", {})
+        self.comment = kw.pop("comment", None)
+        self.prefixes = kw.pop("prefixes", None)
+        self.kw = kw
+        self._namespace_metadata = _namespace_metadata
+        self._constraints_included = _constraints_included
+
+    def reverse(self) -> DropTableOp:
+        return DropTableOp.from_table(
+            self.to_table(), _namespace_metadata=self._namespace_metadata
+        )
+
+    def to_diff_tuple(self) -> Tuple[str, Table]:
+        return ("add_table", self.to_table())
+
+    @classmethod
+    def from_table(
+        cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None
+    ) -> CreateTableOp:
+        if _namespace_metadata is None:
+            _namespace_metadata = table.metadata
+
+        return cls(
+            table.name,
+            list(table.c) + list(table.constraints),
+            schema=table.schema,
+            _namespace_metadata=_namespace_metadata,
+            # given a Table() object, this Table will contain full Index()
+            # and UniqueConstraint objects already constructed in response to
+            # each unique=True / index=True flag on a Column.  Carry this
+            # state along so that when we re-convert back into a Table, we
+            # skip unique=True/index=True so that these constraints are
+            # not doubled up. see #844 #848
+            _constraints_included=True,
+            comment=table.comment,
+            info=dict(table.info),
+            prefixes=list(table._prefixes),
+            **table.kwargs,
+        )
+
+    def to_table(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Table:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        return schema_obj.table(
+            self.table_name,
+            *self.columns,
+            schema=self.schema,
+            prefixes=list(self.prefixes) if self.prefixes else [],
+            comment=self.comment,
+            info=self.info.copy() if self.info else {},
+            _constraints_included=self._constraints_included,
+            **self.kw,
+        )
+
+    @classmethod
+    def create_table(
+        cls,
+        operations: Operations,
+        table_name: str,
+        *columns: SchemaItem,
+        **kw: Any,
+    ) -> Table:
+        r"""Issue a "create table" instruction using the current migration
+        context.
+
+        This directive receives an argument list similar to that of the
+        traditional :class:`sqlalchemy.schema.Table` construct, but without the
+        metadata::
+
+            from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+            from alembic import op
+
+            op.create_table(
+                "account",
+                Column("id", INTEGER, primary_key=True),
+                Column("name", VARCHAR(50), nullable=False),
+                Column("description", NVARCHAR(200)),
+                Column("timestamp", TIMESTAMP, server_default=func.now()),
+            )
+
+        Note that :meth:`.create_table` accepts
+        :class:`~sqlalchemy.schema.Column`
+        constructs directly from the SQLAlchemy library.  In particular,
+        default values to be created on the database side are
+        specified using the ``server_default`` parameter, and not
+        ``default`` which only specifies Python-side defaults::
+
+            from alembic import op
+            from sqlalchemy import Column, TIMESTAMP, func
+
+            # specify "DEFAULT NOW" along with the "timestamp" column
+            op.create_table(
+                "account",
+                Column("id", INTEGER, primary_key=True),
+                Column("timestamp", TIMESTAMP, server_default=func.now()),
+            )
+
+        The function also returns a newly created
+        :class:`~sqlalchemy.schema.Table` object, corresponding to the table
+        specification given, which is suitable for
+        immediate SQL operations, in particular
+        :meth:`.Operations.bulk_insert`::
+
+            from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
+            from alembic import op
+
+            account_table = op.create_table(
+                "account",
+                Column("id", INTEGER, primary_key=True),
+                Column("name", VARCHAR(50), nullable=False),
+                Column("description", NVARCHAR(200)),
+                Column("timestamp", TIMESTAMP, server_default=func.now()),
+            )
+
+            op.bulk_insert(
+                account_table,
+                [
+                    {"name": "A1", "description": "account 1"},
+                    {"name": "A2", "description": "account 2"},
+                ],
+            )
+
+        :param table_name: Name of the table
+        :param \*columns: collection of :class:`~sqlalchemy.schema.Column`
+         objects within
+         the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
+         objects
+         and :class:`~.sqlalchemy.schema.Index` objects.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param \**kw: Other keyword arguments are passed to the underlying
+         :class:`sqlalchemy.schema.Table` object created for the command.
+
+        :return: the :class:`~sqlalchemy.schema.Table` object corresponding
+         to the parameters given.
+
+        """
+        op = cls(table_name, columns, **kw)
+        return operations.invoke(op)
+
+
+@Operations.register_operation("drop_table")
+class DropTableOp(MigrateOperation):
+    """Represent a drop table operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        *,
+        schema: Optional[str] = None,
+        table_kw: Optional[MutableMapping[Any, Any]] = None,
+        _reverse: Optional[CreateTableOp] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.schema = schema
+        self.table_kw = table_kw or {}
+        self.comment = self.table_kw.pop("comment", None)
+        self.info = self.table_kw.pop("info", None)
+        self.prefixes = self.table_kw.pop("prefixes", None)
+        self._reverse = _reverse
+
+    def to_diff_tuple(self) -> Tuple[str, Table]:
+        return ("remove_table", self.to_table())
+
+    def reverse(self) -> CreateTableOp:
+        return CreateTableOp.from_table(self.to_table())
+
+    @classmethod
+    def from_table(
+        cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None
+    ) -> DropTableOp:
+        return cls(
+            table.name,
+            schema=table.schema,
+            table_kw={
+                "comment": table.comment,
+                "info": dict(table.info),
+                "prefixes": list(table._prefixes),
+                **table.kwargs,
+            },
+            _reverse=CreateTableOp.from_table(
+                table, _namespace_metadata=_namespace_metadata
+            ),
+        )
+
+    def to_table(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Table:
+        if self._reverse:
+            cols_and_constraints = self._reverse.columns
+        else:
+            cols_and_constraints = []
+
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        t = schema_obj.table(
+            self.table_name,
+            *cols_and_constraints,
+            comment=self.comment,
+            info=self.info.copy() if self.info else {},
+            prefixes=list(self.prefixes) if self.prefixes else [],
+            schema=self.schema,
+            _constraints_included=self._reverse._constraints_included
+            if self._reverse
+            else False,
+            **self.table_kw,
+        )
+        return t
+
+    @classmethod
+    def drop_table(
+        cls,
+        operations: Operations,
+        table_name: str,
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        r"""Issue a "drop table" instruction using the current
+        migration context.
+
+
+        e.g.::
+
+            drop_table("accounts")
+
+        :param table_name: Name of the table
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param \**kw: Other keyword arguments are passed to the underlying
+         :class:`sqlalchemy.schema.Table` object created for the command.
+
+        """
+        op = cls(table_name, schema=schema, table_kw=kw)
+        operations.invoke(op)
+
+
+class AlterTableOp(MigrateOperation):
+    """Represent an alter table operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.schema = schema
+
+
+@Operations.register_operation("rename_table")
+class RenameTableOp(AlterTableOp):
+    """Represent a rename table operation."""
+
+    def __init__(
+        self,
+        old_table_name: str,
+        new_table_name: str,
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        super().__init__(old_table_name, schema=schema)
+        self.new_table_name = new_table_name
+
+    @classmethod
+    def rename_table(
+        cls,
+        operations: Operations,
+        old_table_name: str,
+        new_table_name: str,
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Emit an ALTER TABLE to rename a table.
+
+        :param old_table_name: old name.
+        :param new_table_name: new name.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+        op = cls(old_table_name, new_table_name, schema=schema)
+        return operations.invoke(op)
+
+
+@Operations.register_operation("create_table_comment")
+@BatchOperations.register_operation(
+    "create_table_comment", "batch_create_table_comment"
+)
+class CreateTableCommentOp(AlterTableOp):
+    """Represent a COMMENT ON `table` operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        comment: Optional[str],
+        *,
+        schema: Optional[str] = None,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.comment = comment
+        self.existing_comment = existing_comment
+        self.schema = schema
+
+    @classmethod
+    def create_table_comment(
+        cls,
+        operations: Operations,
+        table_name: str,
+        comment: Optional[str],
+        *,
+        existing_comment: Optional[str] = None,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Emit a COMMENT ON operation to set the comment for a table.
+
+        :param table_name: string name of the target table.
+        :param comment: string value of the comment being registered against
+         the specified table.
+        :param existing_comment: String value of a comment
+         already registered on the specified table, used within autogenerate
+         so that the operation is reversible, but not required for direct
+         use.
+
+        .. seealso::
+
+            :meth:`.Operations.drop_table_comment`
+
+            :paramref:`.Operations.alter_column.comment`
+
+        """
+
+        op = cls(
+            table_name,
+            comment,
+            existing_comment=existing_comment,
+            schema=schema,
+        )
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_create_table_comment(
+        cls,
+        operations: BatchOperations,
+        comment: Optional[str],
+        *,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        """Emit a COMMENT ON operation to set the comment for a table
+        using the current batch migration context.
+
+        :param comment: string value of the comment being registered against
+         the specified table.
+        :param existing_comment: String value of a comment
+         already registered on the specified table, used within autogenerate
+         so that the operation is reversible, but not required for direct
+         use.
+
+        """
+
+        op = cls(
+            operations.impl.table_name,
+            comment,
+            existing_comment=existing_comment,
+            schema=operations.impl.schema,
+        )
+        return operations.invoke(op)
+
+    def reverse(self) -> Union[CreateTableCommentOp, DropTableCommentOp]:
+        """Reverses the COMMENT ON operation against a table."""
+        if self.existing_comment is None:
+            return DropTableCommentOp(
+                self.table_name,
+                existing_comment=self.comment,
+                schema=self.schema,
+            )
+        else:
+            return CreateTableCommentOp(
+                self.table_name,
+                self.existing_comment,
+                existing_comment=self.comment,
+                schema=self.schema,
+            )
+
+    def to_table(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Table:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        return schema_obj.table(
+            self.table_name, schema=self.schema, comment=self.comment
+        )
+
+    def to_diff_tuple(self) -> Tuple[Any, ...]:
+        return ("add_table_comment", self.to_table(), self.existing_comment)
+
+
+@Operations.register_operation("drop_table_comment")
+@BatchOperations.register_operation(
+    "drop_table_comment", "batch_drop_table_comment"
+)
+class DropTableCommentOp(AlterTableOp):
+    """Represent an operation to remove the comment from a table."""
+
+    def __init__(
+        self,
+        table_name: str,
+        *,
+        schema: Optional[str] = None,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        self.table_name = table_name
+        self.existing_comment = existing_comment
+        self.schema = schema
+
+    @classmethod
+    def drop_table_comment(
+        cls,
+        operations: Operations,
+        table_name: str,
+        *,
+        existing_comment: Optional[str] = None,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Issue a "drop table comment" operation to
+        remove an existing comment set on a table.
+
+        :param table_name: string name of the target table.
+        :param existing_comment: An optional string value of a comment already
+         registered on the specified table.
+
+        .. seealso::
+
+            :meth:`.Operations.create_table_comment`
+
+            :paramref:`.Operations.alter_column.comment`
+
+        """
+
+        op = cls(table_name, existing_comment=existing_comment, schema=schema)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_drop_table_comment(
+        cls,
+        operations: BatchOperations,
+        *,
+        existing_comment: Optional[str] = None,
+    ) -> None:
+        """Issue a "drop table comment" operation to
+        remove an existing comment set on a table using the current
+        batch operations context.
+
+        :param existing_comment: An optional string value of a comment already
+         registered on the specified table.
+
+        """
+
+        op = cls(
+            operations.impl.table_name,
+            existing_comment=existing_comment,
+            schema=operations.impl.schema,
+        )
+        return operations.invoke(op)
+
+    def reverse(self) -> CreateTableCommentOp:
+        """Reverses the COMMENT ON operation against a table."""
+        return CreateTableCommentOp(
+            self.table_name, self.existing_comment, schema=self.schema
+        )
+
+    def to_table(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Table:
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+
+        return schema_obj.table(self.table_name, schema=self.schema)
+
+    def to_diff_tuple(self) -> Tuple[Any, ...]:
+        return ("remove_table_comment", self.to_table())
+
+
+@Operations.register_operation("alter_column")
+@BatchOperations.register_operation("alter_column", "batch_alter_column")
+class AlterColumnOp(AlterTableOp):
+    """Represent an alter column operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        column_name: str,
+        *,
+        schema: Optional[str] = None,
+        existing_type: Optional[Any] = None,
+        existing_server_default: Any = False,
+        existing_nullable: Optional[bool] = None,
+        existing_comment: Optional[str] = None,
+        modify_nullable: Optional[bool] = None,
+        modify_comment: Optional[Union[str, Literal[False]]] = False,
+        modify_server_default: Any = False,
+        modify_name: Optional[str] = None,
+        modify_type: Optional[Any] = None,
+        **kw: Any,
+    ) -> None:
+        super().__init__(table_name, schema=schema)
+        self.column_name = column_name
+        self.existing_type = existing_type
+        self.existing_server_default = existing_server_default
+        self.existing_nullable = existing_nullable
+        self.existing_comment = existing_comment
+        self.modify_nullable = modify_nullable
+        self.modify_comment = modify_comment
+        self.modify_server_default = modify_server_default
+        self.modify_name = modify_name
+        self.modify_type = modify_type
+        self.kw = kw
+
+    def to_diff_tuple(self) -> Any:
+        col_diff = []
+        schema, tname, cname = self.schema, self.table_name, self.column_name
+
+        if self.modify_type is not None:
+            col_diff.append(
+                (
+                    "modify_type",
+                    schema,
+                    tname,
+                    cname,
+                    {
+                        "existing_nullable": self.existing_nullable,
+                        "existing_server_default": (
+                            self.existing_server_default
+                        ),
+                        "existing_comment": self.existing_comment,
+                    },
+                    self.existing_type,
+                    self.modify_type,
+                )
+            )
+
+        if self.modify_nullable is not None:
+            col_diff.append(
+                (
+                    "modify_nullable",
+                    schema,
+                    tname,
+                    cname,
+                    {
+                        "existing_type": self.existing_type,
+                        "existing_server_default": (
+                            self.existing_server_default
+                        ),
+                        "existing_comment": self.existing_comment,
+                    },
+                    self.existing_nullable,
+                    self.modify_nullable,
+                )
+            )
+
+        if self.modify_server_default is not False:
+            col_diff.append(
+                (
+                    "modify_default",
+                    schema,
+                    tname,
+                    cname,
+                    {
+                        "existing_nullable": self.existing_nullable,
+                        "existing_type": self.existing_type,
+                        "existing_comment": self.existing_comment,
+                    },
+                    self.existing_server_default,
+                    self.modify_server_default,
+                )
+            )
+
+        if self.modify_comment is not False:
+            col_diff.append(
+                (
+                    "modify_comment",
+                    schema,
+                    tname,
+                    cname,
+                    {
+                        "existing_nullable": self.existing_nullable,
+                        "existing_type": self.existing_type,
+                        "existing_server_default": (
+                            self.existing_server_default
+                        ),
+                    },
+                    self.existing_comment,
+                    self.modify_comment,
+                )
+            )
+
+        return col_diff
+
+    def has_changes(self) -> bool:
+        hc1 = (
+            self.modify_nullable is not None
+            or self.modify_server_default is not False
+            or self.modify_type is not None
+            or self.modify_comment is not False
+        )
+        if hc1:
+            return True
+        for kw in self.kw:
+            if kw.startswith("modify_"):
+                return True
+        else:
+            return False
+
+    def reverse(self) -> AlterColumnOp:
+        kw = self.kw.copy()
+        kw["existing_type"] = self.existing_type
+        kw["existing_nullable"] = self.existing_nullable
+        kw["existing_server_default"] = self.existing_server_default
+        kw["existing_comment"] = self.existing_comment
+        if self.modify_type is not None:
+            kw["modify_type"] = self.modify_type
+        if self.modify_nullable is not None:
+            kw["modify_nullable"] = self.modify_nullable
+        if self.modify_server_default is not False:
+            kw["modify_server_default"] = self.modify_server_default
+        if self.modify_comment is not False:
+            kw["modify_comment"] = self.modify_comment
+
+        # TODO: make this a little simpler
+        all_keys = {
+            m.group(1)
+            for m in [re.match(r"^(?:existing_|modify_)(.+)$", k) for k in kw]
+            if m
+        }
+
+        for k in all_keys:
+            if "modify_%s" % k in kw:
+                swap = kw["existing_%s" % k]
+                kw["existing_%s" % k] = kw["modify_%s" % k]
+                kw["modify_%s" % k] = swap
+
+        return self.__class__(
+            self.table_name, self.column_name, schema=self.schema, **kw
+        )
+
+    @classmethod
+    def alter_column(
+        cls,
+        operations: Operations,
+        table_name: str,
+        column_name: str,
+        *,
+        nullable: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+        server_default: Any = False,
+        new_column_name: Optional[str] = None,
+        type_: Optional[Union[TypeEngine[Any], Type[TypeEngine[Any]]]] = None,
+        existing_type: Optional[
+            Union[TypeEngine[Any], Type[TypeEngine[Any]]]
+        ] = None,
+        existing_server_default: Optional[
+            Union[str, bool, Identity, Computed]
+        ] = False,
+        existing_nullable: Optional[bool] = None,
+        existing_comment: Optional[str] = None,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        r"""Issue an "alter column" instruction using the
+        current migration context.
+
+        Generally, only that aspect of the column which
+        is being changed, i.e. name, type, nullability,
+        default, needs to be specified.  Multiple changes
+        can also be specified at once and the backend should
+        "do the right thing", emitting each change either
+        separately or together as the backend allows.
+
+        MySQL has special requirements here, since MySQL
+        cannot ALTER a column without a full specification.
+        When producing MySQL-compatible migration files,
+        it is recommended that the ``existing_type``,
+        ``existing_server_default``, and ``existing_nullable``
+        parameters be present, if not being altered.
+
+        Type changes which are against the SQLAlchemy
+        "schema" types :class:`~sqlalchemy.types.Boolean`
+        and  :class:`~sqlalchemy.types.Enum` may also
+        add or drop constraints which accompany those
+        types on backends that don't support them natively.
+        The ``existing_type`` argument is
+        used in this case to identify and remove a previous
+        constraint that was bound to the type object.
+
+        :param table_name: string name of the target table.
+        :param column_name: string name of the target column,
+         as it exists before the operation begins.
+        :param nullable: Optional; specify ``True`` or ``False``
+         to alter the column's nullability.
+        :param server_default: Optional; specify a string
+         SQL expression, :func:`~sqlalchemy.sql.expression.text`,
+         or :class:`~sqlalchemy.schema.DefaultClause` to indicate
+         an alteration to the column's default value.
+         Set to ``None`` to have the default removed.
+        :param comment: optional string text of a new comment to add to the
+         column.
+        :param new_column_name: Optional; specify a string name here to
+         indicate the new name within a column rename operation.
+        :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
+         type object to specify a change to the column's type.
+         For SQLAlchemy types that also indicate a constraint (i.e.
+         :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
+         the constraint is also generated.
+        :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
+         currently understood by the MySQL dialect.
+        :param existing_type: Optional; a
+         :class:`~sqlalchemy.types.TypeEngine`
+         type object to specify the previous type.   This
+         is required for all MySQL column alter operations that
+         don't otherwise specify a new type, as well as for
+         when nullability is being changed on a SQL Server
+         column.  It is also used if the type is a so-called
+         SQLAlchemy "schema" type which may define a constraint (i.e.
+         :class:`~sqlalchemy.types.Boolean`,
+         :class:`~sqlalchemy.types.Enum`),
+         so that the constraint can be dropped.
+        :param existing_server_default: Optional; The existing
+         default value of the column.   Required on MySQL if
+         an existing default is not being changed; else MySQL
+         removes the default.
+        :param existing_nullable: Optional; the existing nullability
+         of the column.  Required on MySQL if the existing nullability
+         is not being changed; else MySQL sets this to NULL.
+        :param existing_autoincrement: Optional; the existing autoincrement
+         of the column.  Used for MySQL's system of altering a column
+         that specifies ``AUTO_INCREMENT``.
+        :param existing_comment: string text of the existing comment on the
+         column to be maintained.  Required on MySQL if the existing comment
+         on the column is not being changed.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param postgresql_using: String argument which will indicate a
+         SQL expression to render within the Postgresql-specific USING clause
+         within ALTER COLUMN.    This string is taken directly as raw SQL which
+         must explicitly include any necessary quoting or escaping of tokens
+         within the expression.
+
+        """
+
+        alt = cls(
+            table_name,
+            column_name,
+            schema=schema,
+            existing_type=existing_type,
+            existing_server_default=existing_server_default,
+            existing_nullable=existing_nullable,
+            existing_comment=existing_comment,
+            modify_name=new_column_name,
+            modify_type=type_,
+            modify_server_default=server_default,
+            modify_nullable=nullable,
+            modify_comment=comment,
+            **kw,
+        )
+
+        return operations.invoke(alt)
+
+    @classmethod
+    def batch_alter_column(
+        cls,
+        operations: BatchOperations,
+        column_name: str,
+        *,
+        nullable: Optional[bool] = None,
+        comment: Optional[Union[str, Literal[False]]] = False,
+        server_default: Any = False,
+        new_column_name: Optional[str] = None,
+        type_: Optional[Union[TypeEngine[Any], Type[TypeEngine[Any]]]] = None,
+        existing_type: Optional[
+            Union[TypeEngine[Any], Type[TypeEngine[Any]]]
+        ] = None,
+        existing_server_default: Optional[
+            Union[str, bool, Identity, Computed]
+        ] = False,
+        existing_nullable: Optional[bool] = None,
+        existing_comment: Optional[str] = None,
+        insert_before: Optional[str] = None,
+        insert_after: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        """Issue an "alter column" instruction using the current
+        batch migration context.
+
+        Parameters are the same as that of :meth:`.Operations.alter_column`,
+        as well as the following option(s):
+
+        :param insert_before: String name of an existing column which this
+         column should be placed before, when creating the new table.
+
+        :param insert_after: String name of an existing column which this
+         column should be placed after, when creating the new table.  If
+         both :paramref:`.BatchOperations.alter_column.insert_before`
+         and :paramref:`.BatchOperations.alter_column.insert_after` are
+         omitted, the column is inserted after the last existing column
+         in the table.
+
+        .. seealso::
+
+            :meth:`.Operations.alter_column`
+
+
+        """
+        alt = cls(
+            operations.impl.table_name,
+            column_name,
+            schema=operations.impl.schema,
+            existing_type=existing_type,
+            existing_server_default=existing_server_default,
+            existing_nullable=existing_nullable,
+            existing_comment=existing_comment,
+            modify_name=new_column_name,
+            modify_type=type_,
+            modify_server_default=server_default,
+            modify_nullable=nullable,
+            modify_comment=comment,
+            insert_before=insert_before,
+            insert_after=insert_after,
+            **kw,
+        )
+
+        return operations.invoke(alt)
+
+
+@Operations.register_operation("add_column")
+@BatchOperations.register_operation("add_column", "batch_add_column")
+class AddColumnOp(AlterTableOp):
+    """Represent an add column operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        column: Column[Any],
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        super().__init__(table_name, schema=schema)
+        self.column = column
+        self.kw = kw
+
+    def reverse(self) -> DropColumnOp:
+        return DropColumnOp.from_column_and_tablename(
+            self.schema, self.table_name, self.column
+        )
+
+    def to_diff_tuple(
+        self,
+    ) -> Tuple[str, Optional[str], str, Column[Any]]:
+        return ("add_column", self.schema, self.table_name, self.column)
+
+    def to_column(self) -> Column[Any]:
+        return self.column
+
+    @classmethod
+    def from_column(cls, col: Column[Any]) -> AddColumnOp:
+        return cls(col.table.name, col, schema=col.table.schema)
+
+    @classmethod
+    def from_column_and_tablename(
+        cls,
+        schema: Optional[str],
+        tname: str,
+        col: Column[Any],
+    ) -> AddColumnOp:
+        return cls(tname, col, schema=schema)
+
+    @classmethod
+    def add_column(
+        cls,
+        operations: Operations,
+        table_name: str,
+        column: Column[Any],
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        """Issue an "add column" instruction using the current
+        migration context.
+
+        e.g.::
+
+            from alembic import op
+            from sqlalchemy import Column, String
+
+            op.add_column("organization", Column("name", String()))
+
+        The :meth:`.Operations.add_column` method typically corresponds
+        to the SQL command "ALTER TABLE... ADD COLUMN".    Within the scope
+        of this command, the column's name, datatype, nullability,
+        and optional server-generated defaults may be indicated.
+
+        .. note::
+
+            With the exception of NOT NULL constraints or single-column FOREIGN
+            KEY constraints, other kinds of constraints such as PRIMARY KEY,
+            UNIQUE or CHECK constraints **cannot** be generated using this
+            method; for these constraints, refer to operations such as
+            :meth:`.Operations.create_primary_key` and
+            :meth:`.Operations.create_check_constraint`. In particular, the
+            following :class:`~sqlalchemy.schema.Column` parameters are
+            **ignored**:
+
+            * :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
+              typically do not support an ALTER operation that can add
+              individual columns one at a time to an existing primary key
+              constraint, therefore it's less ambiguous to use the
+              :meth:`.Operations.create_primary_key` method, which assumes no
+              existing primary key constraint is present.
+            * :paramref:`~sqlalchemy.schema.Column.unique` - use the
+              :meth:`.Operations.create_unique_constraint` method
+            * :paramref:`~sqlalchemy.schema.Column.index` - use the
+              :meth:`.Operations.create_index` method
+
+
+        The provided :class:`~sqlalchemy.schema.Column` object may include a
+        :class:`~sqlalchemy.schema.ForeignKey` constraint directive,
+        referencing a remote table name. For this specific type of constraint,
+        Alembic will automatically emit a second ALTER statement in order to
+        add the single-column FOREIGN KEY constraint separately::
+
+            from alembic import op
+            from sqlalchemy import Column, INTEGER, ForeignKey
+
+            op.add_column(
+                "organization",
+                Column("account_id", INTEGER, ForeignKey("accounts.id")),
+            )
+
+        The column argument passed to :meth:`.Operations.add_column` is a
+        :class:`~sqlalchemy.schema.Column` construct, used in the same way it's
+        used in SQLAlchemy. In particular, values or functions to be indicated
+        as producing the column's default value on the database side are
+        specified using the ``server_default`` parameter, and not ``default``
+        which only specifies Python-side defaults::
+
+            from alembic import op
+            from sqlalchemy import Column, TIMESTAMP, func
+
+            # specify "DEFAULT NOW" along with the column add
+            op.add_column(
+                "account",
+                Column("timestamp", TIMESTAMP, server_default=func.now()),
+            )
+
+        :param table_name: String name of the parent table.
+        :param column: a :class:`sqlalchemy.schema.Column` object
+         representing the new column.
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+
+        """
+
+        op = cls(table_name, column, schema=schema)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_add_column(
+        cls,
+        operations: BatchOperations,
+        column: Column[Any],
+        *,
+        insert_before: Optional[str] = None,
+        insert_after: Optional[str] = None,
+    ) -> None:
+        """Issue an "add column" instruction using the current
+        batch migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.add_column`
+
+        """
+
+        kw = {}
+        if insert_before:
+            kw["insert_before"] = insert_before
+        if insert_after:
+            kw["insert_after"] = insert_after
+
+        op = cls(
+            operations.impl.table_name,
+            column,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("drop_column")
+@BatchOperations.register_operation("drop_column", "batch_drop_column")
+class DropColumnOp(AlterTableOp):
+    """Represent a drop column operation."""
+
+    def __init__(
+        self,
+        table_name: str,
+        column_name: str,
+        *,
+        schema: Optional[str] = None,
+        _reverse: Optional[AddColumnOp] = None,
+        **kw: Any,
+    ) -> None:
+        super().__init__(table_name, schema=schema)
+        self.column_name = column_name
+        self.kw = kw
+        self._reverse = _reverse
+
+    def to_diff_tuple(
+        self,
+    ) -> Tuple[str, Optional[str], str, Column[Any]]:
+        return (
+            "remove_column",
+            self.schema,
+            self.table_name,
+            self.to_column(),
+        )
+
+    def reverse(self) -> AddColumnOp:
+        if self._reverse is None:
+            raise ValueError(
+                "operation is not reversible; "
+                "original column is not present"
+            )
+
+        return AddColumnOp.from_column_and_tablename(
+            self.schema, self.table_name, self._reverse.column
+        )
+
+    @classmethod
+    def from_column_and_tablename(
+        cls,
+        schema: Optional[str],
+        tname: str,
+        col: Column[Any],
+    ) -> DropColumnOp:
+        return cls(
+            tname,
+            col.name,
+            schema=schema,
+            _reverse=AddColumnOp.from_column_and_tablename(schema, tname, col),
+        )
+
+    def to_column(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> Column[Any]:
+        if self._reverse is not None:
+            return self._reverse.column
+        schema_obj = schemaobj.SchemaObjects(migration_context)
+        return schema_obj.column(self.column_name, NULLTYPE)
+
+    @classmethod
+    def drop_column(
+        cls,
+        operations: Operations,
+        table_name: str,
+        column_name: str,
+        *,
+        schema: Optional[str] = None,
+        **kw: Any,
+    ) -> None:
+        """Issue a "drop column" instruction using the current
+        migration context.
+
+        e.g.::
+
+            drop_column("organization", "account_id")
+
+        :param table_name: name of table
+        :param column_name: name of column
+        :param schema: Optional schema name to operate within.  To control
+         quoting of the schema outside of the default behavior, use
+         the SQLAlchemy construct
+         :class:`~sqlalchemy.sql.elements.quoted_name`.
+        :param mssql_drop_check: Optional boolean.  When ``True``, on
+         Microsoft SQL Server only, first
+         drop the CHECK constraint on the column using a
+         SQL-script-compatible
+         block that selects into a @variable from sys.check_constraints,
+         then exec's a separate DROP CONSTRAINT for that constraint.
+        :param mssql_drop_default: Optional boolean.  When ``True``, on
+         Microsoft SQL Server only, first
+         drop the DEFAULT constraint on the column using a
+         SQL-script-compatible
+         block that selects into a @variable from sys.default_constraints,
+         then exec's a separate DROP CONSTRAINT for that default.
+        :param mssql_drop_foreign_key: Optional boolean.  When ``True``, on
+         Microsoft SQL Server only, first
+         drop a single FOREIGN KEY constraint on the column using a
+         SQL-script-compatible
+         block that selects into a @variable from
+         sys.foreign_keys/sys.foreign_key_columns,
+         then exec's a separate DROP CONSTRAINT for that default.  Only
+         works if the column has exactly one FK constraint which refers to
+         it, at the moment.
+
+        """
+
+        op = cls(table_name, column_name, schema=schema, **kw)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_drop_column(
+        cls, operations: BatchOperations, column_name: str, **kw: Any
+    ) -> None:
+        """Issue a "drop column" instruction using the current
+        batch migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.drop_column`
+
+        """
+        op = cls(
+            operations.impl.table_name,
+            column_name,
+            schema=operations.impl.schema,
+            **kw,
+        )
+        return operations.invoke(op)
+
+
+@Operations.register_operation("bulk_insert")
+class BulkInsertOp(MigrateOperation):
+    """Represent a bulk insert operation."""
+
+    def __init__(
+        self,
+        table: Union[Table, TableClause],
+        rows: List[Dict[str, Any]],
+        *,
+        multiinsert: bool = True,
+    ) -> None:
+        self.table = table
+        self.rows = rows
+        self.multiinsert = multiinsert
+
+    @classmethod
+    def bulk_insert(
+        cls,
+        operations: Operations,
+        table: Union[Table, TableClause],
+        rows: List[Dict[str, Any]],
+        *,
+        multiinsert: bool = True,
+    ) -> None:
+        """Issue a "bulk insert" operation using the current
+        migration context.
+
+        This provides a means of representing an INSERT of multiple rows
+        which works equally well in the context of executing on a live
+        connection as well as that of generating a SQL script.   In the
+        case of a SQL script, the values are rendered inline into the
+        statement.
+
+        e.g.::
+
+            from alembic import op
+            from datetime import date
+            from sqlalchemy.sql import table, column
+            from sqlalchemy import String, Integer, Date
+
+            # Create an ad-hoc table to use for the insert statement.
+            accounts_table = table(
+                "account",
+                column("id", Integer),
+                column("name", String),
+                column("create_date", Date),
+            )
+
+            op.bulk_insert(
+                accounts_table,
+                [
+                    {
+                        "id": 1,
+                        "name": "John Smith",
+                        "create_date": date(2010, 10, 5),
+                    },
+                    {
+                        "id": 2,
+                        "name": "Ed Williams",
+                        "create_date": date(2007, 5, 27),
+                    },
+                    {
+                        "id": 3,
+                        "name": "Wendy Jones",
+                        "create_date": date(2008, 8, 15),
+                    },
+                ],
+            )
+
+        When using --sql mode, some datatypes may not render inline
+        automatically, such as dates and other special types.   When this
+        issue is present, :meth:`.Operations.inline_literal` may be used::
+
+            op.bulk_insert(
+                accounts_table,
+                [
+                    {
+                        "id": 1,
+                        "name": "John Smith",
+                        "create_date": op.inline_literal("2010-10-05"),
+                    },
+                    {
+                        "id": 2,
+                        "name": "Ed Williams",
+                        "create_date": op.inline_literal("2007-05-27"),
+                    },
+                    {
+                        "id": 3,
+                        "name": "Wendy Jones",
+                        "create_date": op.inline_literal("2008-08-15"),
+                    },
+                ],
+                multiinsert=False,
+            )
+
+        When using :meth:`.Operations.inline_literal` in conjunction with
+        :meth:`.Operations.bulk_insert`, in order for the statement to work
+        in "online" (e.g. non --sql) mode, the
+        :paramref:`~.Operations.bulk_insert.multiinsert`
+        flag should be set to ``False``, which will have the effect of
+        individual INSERT statements being emitted to the database, each
+        with a distinct VALUES clause, so that the "inline" values can
+        still be rendered, rather than attempting to pass the values
+        as bound parameters.
+
+        :param table: a table object which represents the target of the INSERT.
+
+        :param rows: a list of dictionaries indicating rows.
+
+        :param multiinsert: when at its default of True and --sql mode is not
+           enabled, the INSERT statement will be executed using
+           "executemany()" style, where all elements in the list of
+           dictionaries are passed as bound parameters in a single
+           list.   Setting this to False results in individual INSERT
+           statements being emitted per parameter set, and is needed
+           in those cases where non-literal values are present in the
+           parameter sets.
+
+        """
+
+        op = cls(table, rows, multiinsert=multiinsert)
+        operations.invoke(op)
+
+
+@Operations.register_operation("execute")
+@BatchOperations.register_operation("execute", "batch_execute")
+class ExecuteSQLOp(MigrateOperation):
+    """Represent an execute SQL operation."""
+
+    def __init__(
+        self,
+        sqltext: Union[Executable, str],
+        *,
+        execution_options: Optional[dict[str, Any]] = None,
+    ) -> None:
+        self.sqltext = sqltext
+        self.execution_options = execution_options
+
+    @classmethod
+    def execute(
+        cls,
+        operations: Operations,
+        sqltext: Union[Executable, str],
+        *,
+        execution_options: Optional[dict[str, Any]] = None,
+    ) -> None:
+        r"""Execute the given SQL using the current migration context.
+
+        The given SQL can be a plain string, e.g.::
+
+            op.execute("INSERT INTO table (foo) VALUES ('some value')")
+
+        Or it can be any kind of Core SQL Expression construct, such as
+        below where we use an update construct::
+
+            from sqlalchemy.sql import table, column
+            from sqlalchemy import String
+            from alembic import op
+
+            account = table("account", column("name", String))
+            op.execute(
+                account.update()
+                .where(account.c.name == op.inline_literal("account 1"))
+                .values({"name": op.inline_literal("account 2")})
+            )
+
+        Above, we made use of the SQLAlchemy
+        :func:`sqlalchemy.sql.expression.table` and
+        :func:`sqlalchemy.sql.expression.column` constructs to make a brief,
+        ad-hoc table construct just for our UPDATE statement.  A full
+        :class:`~sqlalchemy.schema.Table` construct of course works perfectly
+        fine as well, though note it's a recommended practice to at least
+        ensure the definition of a table is self-contained within the migration
+        script, rather than imported from a module that may break compatibility
+        with older migrations.
+
+        In a SQL script context, the statement is emitted directly to the
+        output stream.   There is *no* return result, however, as this
+        function is oriented towards generating a change script
+        that can run in "offline" mode.     Additionally, parameterized
+        statements are discouraged here, as they *will not work* in offline
+        mode.  Above, we use :meth:`.inline_literal` where parameters are
+        to be used.
+
+        For full interaction with a connected database where parameters can
+        also be used normally, use the "bind" available from the context::
+
+            from alembic import op
+
+            connection = op.get_bind()
+
+            connection.execute(
+                account.update()
+                .where(account.c.name == "account 1")
+                .values({"name": "account 2"})
+            )
+
+        Additionally, when passing the statement as a plain string, it is first
+        coerced into a :func:`sqlalchemy.sql.expression.text` construct
+        before being passed along.  In the less likely case that the
+        literal SQL string contains a colon, it must be escaped with a
+        backslash, as::
+
+           op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
+
+
+        :param sqltext: Any legal SQLAlchemy expression, including:
+
+        * a string
+        * a :func:`sqlalchemy.sql.expression.text` construct.
+        * a :func:`sqlalchemy.sql.expression.insert` construct.
+        * a :func:`sqlalchemy.sql.expression.update` construct.
+        * a :func:`sqlalchemy.sql.expression.delete` construct.
+        * Any "executable" described in SQLAlchemy Core documentation,
+          noting that no result set is returned.
+
+        .. note::  when passing a plain string, the statement is coerced into
+           a :func:`sqlalchemy.sql.expression.text` construct. This construct
+           considers symbols with colons, e.g. ``:foo`` to be bound parameters.
+           To avoid this, ensure that colon symbols are escaped, e.g.
+           ``\:foo``.
+
+        :param execution_options: Optional dictionary of
+         execution options, will be passed to
+         :meth:`sqlalchemy.engine.Connection.execution_options`.
+        """
+        op = cls(sqltext, execution_options=execution_options)
+        return operations.invoke(op)
+
+    @classmethod
+    def batch_execute(
+        cls,
+        operations: Operations,
+        sqltext: Union[Executable, str],
+        *,
+        execution_options: Optional[dict[str, Any]] = None,
+    ) -> None:
+        """Execute the given SQL using the current migration context.
+
+        .. seealso::
+
+            :meth:`.Operations.execute`
+
+        """
+        return cls.execute(
+            operations, sqltext, execution_options=execution_options
+        )
+
+    def to_diff_tuple(self) -> Tuple[str, Union[Executable, str]]:
+        return ("execute", self.sqltext)
+
+
+class OpContainer(MigrateOperation):
+    """Represent a sequence of operations operation."""
+
+    def __init__(self, ops: Sequence[MigrateOperation] = ()) -> None:
+        self.ops = list(ops)
+
+    def is_empty(self) -> bool:
+        return not self.ops
+
+    def as_diffs(self) -> Any:
+        return list(OpContainer._ops_as_diffs(self))
+
+    @classmethod
+    def _ops_as_diffs(
+        cls, migrations: OpContainer
+    ) -> Iterator[Tuple[Any, ...]]:
+        for op in migrations.ops:
+            if hasattr(op, "ops"):
+                yield from cls._ops_as_diffs(cast("OpContainer", op))
+            else:
+                yield op.to_diff_tuple()
+
+
+class ModifyTableOps(OpContainer):
+    """Contains a sequence of operations that all apply to a single Table."""
+
+    def __init__(
+        self,
+        table_name: str,
+        ops: Sequence[MigrateOperation],
+        *,
+        schema: Optional[str] = None,
+    ) -> None:
+        super().__init__(ops)
+        self.table_name = table_name
+        self.schema = schema
+
+    def reverse(self) -> ModifyTableOps:
+        return ModifyTableOps(
+            self.table_name,
+            ops=list(reversed([op.reverse() for op in self.ops])),
+            schema=self.schema,
+        )
+
+
+class UpgradeOps(OpContainer):
+    """contains a sequence of operations that would apply to the
+    'upgrade' stream of a script.
+
+    .. seealso::
+
+        :ref:`customizing_revision`
+
+    """
+
+    def __init__(
+        self,
+        ops: Sequence[MigrateOperation] = (),
+        upgrade_token: str = "upgrades",
+    ) -> None:
+        super().__init__(ops=ops)
+        self.upgrade_token = upgrade_token
+
+    def reverse_into(self, downgrade_ops: DowngradeOps) -> DowngradeOps:
+        downgrade_ops.ops[:] = list(
+            reversed([op.reverse() for op in self.ops])
+        )
+        return downgrade_ops
+
+    def reverse(self) -> DowngradeOps:
+        return self.reverse_into(DowngradeOps(ops=[]))
+
+
+class DowngradeOps(OpContainer):
+    """contains a sequence of operations that would apply to the
+    'downgrade' stream of a script.
+
+    .. seealso::
+
+        :ref:`customizing_revision`
+
+    """
+
+    def __init__(
+        self,
+        ops: Sequence[MigrateOperation] = (),
+        downgrade_token: str = "downgrades",
+    ) -> None:
+        super().__init__(ops=ops)
+        self.downgrade_token = downgrade_token
+
+    def reverse(self) -> UpgradeOps:
+        return UpgradeOps(
+            ops=list(reversed([op.reverse() for op in self.ops]))
+        )
+
+
+class MigrationScript(MigrateOperation):
+    """represents a migration script.
+
+    E.g. when autogenerate encounters this object, this corresponds to the
+    production of an actual script file.
+
+    A normal :class:`.MigrationScript` object would contain a single
+    :class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive.
+    These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops``
+    attributes.
+
+    In the case of an autogenerate operation that runs multiple times,
+    such as the multiple database example in the "multidb" template,
+    the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled,
+    and instead these objects should be accessed via the ``.upgrade_ops_list``
+    and ``.downgrade_ops_list`` list-based attributes.  These latter
+    attributes are always available at the very least as single-element lists.
+
+    .. seealso::
+
+        :ref:`customizing_revision`
+
+    """
+
+    _needs_render: Optional[bool]
+    _upgrade_ops: List[UpgradeOps]
+    _downgrade_ops: List[DowngradeOps]
+
+    def __init__(
+        self,
+        rev_id: Optional[str],
+        upgrade_ops: UpgradeOps,
+        downgrade_ops: DowngradeOps,
+        *,
+        message: Optional[str] = None,
+        imports: Set[str] = set(),
+        head: Optional[str] = None,
+        splice: Optional[bool] = None,
+        branch_label: Optional[_RevIdType] = None,
+        version_path: Optional[str] = None,
+        depends_on: Optional[_RevIdType] = None,
+    ) -> None:
+        self.rev_id = rev_id
+        self.message = message
+        self.imports = imports
+        self.head = head
+        self.splice = splice
+        self.branch_label = branch_label
+        self.version_path = version_path
+        self.depends_on = depends_on
+        self.upgrade_ops = upgrade_ops
+        self.downgrade_ops = downgrade_ops
+
+    @property
+    def upgrade_ops(self) -> Optional[UpgradeOps]:
+        """An instance of :class:`.UpgradeOps`.
+
+        .. seealso::
+
+            :attr:`.MigrationScript.upgrade_ops_list`
+        """
+        if len(self._upgrade_ops) > 1:
+            raise ValueError(
+                "This MigrationScript instance has a multiple-entry "
+                "list for UpgradeOps; please use the "
+                "upgrade_ops_list attribute."
+            )
+        elif not self._upgrade_ops:
+            return None
+        else:
+            return self._upgrade_ops[0]
+
+    @upgrade_ops.setter
+    def upgrade_ops(
+        self, upgrade_ops: Union[UpgradeOps, List[UpgradeOps]]
+    ) -> None:
+        self._upgrade_ops = util.to_list(upgrade_ops)
+        for elem in self._upgrade_ops:
+            assert isinstance(elem, UpgradeOps)
+
+    @property
+    def downgrade_ops(self) -> Optional[DowngradeOps]:
+        """An instance of :class:`.DowngradeOps`.
+
+        .. seealso::
+
+            :attr:`.MigrationScript.downgrade_ops_list`
+        """
+        if len(self._downgrade_ops) > 1:
+            raise ValueError(
+                "This MigrationScript instance has a multiple-entry "
+                "list for DowngradeOps; please use the "
+                "downgrade_ops_list attribute."
+            )
+        elif not self._downgrade_ops:
+            return None
+        else:
+            return self._downgrade_ops[0]
+
+    @downgrade_ops.setter
+    def downgrade_ops(
+        self, downgrade_ops: Union[DowngradeOps, List[DowngradeOps]]
+    ) -> None:
+        self._downgrade_ops = util.to_list(downgrade_ops)
+        for elem in self._downgrade_ops:
+            assert isinstance(elem, DowngradeOps)
+
+    @property
+    def upgrade_ops_list(self) -> List[UpgradeOps]:
+        """A list of :class:`.UpgradeOps` instances.
+
+        This is used in place of the :attr:`.MigrationScript.upgrade_ops`
+        attribute when dealing with a revision operation that does
+        multiple autogenerate passes.
+
+        """
+        return self._upgrade_ops
+
+    @property
+    def downgrade_ops_list(self) -> List[DowngradeOps]:
+        """A list of :class:`.DowngradeOps` instances.
+
+        This is used in place of the :attr:`.MigrationScript.downgrade_ops`
+        attribute when dealing with a revision operation that does
+        multiple autogenerate passes.
+
+        """
+        return self._downgrade_ops
diff --git a/venv/Lib/site-packages/alembic/operations/schemaobj.py b/venv/Lib/site-packages/alembic/operations/schemaobj.py
new file mode 100644
index 0000000000000000000000000000000000000000..32b26e9b9d6471c7c663e732a2cfeb35e9eb4bd6
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/operations/schemaobj.py
@@ -0,0 +1,288 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from typing import Any
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import schema as sa_schema
+from sqlalchemy.sql.schema import Column
+from sqlalchemy.sql.schema import Constraint
+from sqlalchemy.sql.schema import Index
+from sqlalchemy.types import Integer
+from sqlalchemy.types import NULLTYPE
+
+from .. import util
+from ..util import sqla_compat
+
+if TYPE_CHECKING:
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.elements import TextClause
+    from sqlalchemy.sql.schema import CheckConstraint
+    from sqlalchemy.sql.schema import ForeignKey
+    from sqlalchemy.sql.schema import ForeignKeyConstraint
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import PrimaryKeyConstraint
+    from sqlalchemy.sql.schema import Table
+    from sqlalchemy.sql.schema import UniqueConstraint
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from ..runtime.migration import MigrationContext
+
+
+class SchemaObjects:
+    def __init__(
+        self, migration_context: Optional[MigrationContext] = None
+    ) -> None:
+        self.migration_context = migration_context
+
+    def primary_key_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        cols: Sequence[str],
+        schema: Optional[str] = None,
+        **dialect_kw,
+    ) -> PrimaryKeyConstraint:
+        m = self.metadata()
+        columns = [sa_schema.Column(n, NULLTYPE) for n in cols]
+        t = sa_schema.Table(table_name, m, *columns, schema=schema)
+        # SQLAlchemy primary key constraint name arg is wrongly typed on
+        # the SQLAlchemy side through 2.0.5 at least
+        p = sa_schema.PrimaryKeyConstraint(
+            *[t.c[n] for n in cols], name=name, **dialect_kw  # type: ignore
+        )
+        return p
+
+    def foreign_key_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        source: str,
+        referent: str,
+        local_cols: List[str],
+        remote_cols: List[str],
+        onupdate: Optional[str] = None,
+        ondelete: Optional[str] = None,
+        deferrable: Optional[bool] = None,
+        source_schema: Optional[str] = None,
+        referent_schema: Optional[str] = None,
+        initially: Optional[str] = None,
+        match: Optional[str] = None,
+        **dialect_kw,
+    ) -> ForeignKeyConstraint:
+        m = self.metadata()
+        if source == referent and source_schema == referent_schema:
+            t1_cols = local_cols + remote_cols
+        else:
+            t1_cols = local_cols
+            sa_schema.Table(
+                referent,
+                m,
+                *[sa_schema.Column(n, NULLTYPE) for n in remote_cols],
+                schema=referent_schema,
+            )
+
+        t1 = sa_schema.Table(
+            source,
+            m,
+            *[
+                sa_schema.Column(n, NULLTYPE)
+                for n in util.unique_list(t1_cols)
+            ],
+            schema=source_schema,
+        )
+
+        tname = (
+            "%s.%s" % (referent_schema, referent)
+            if referent_schema
+            else referent
+        )
+
+        dialect_kw["match"] = match
+
+        f = sa_schema.ForeignKeyConstraint(
+            local_cols,
+            ["%s.%s" % (tname, n) for n in remote_cols],
+            name=name,
+            onupdate=onupdate,
+            ondelete=ondelete,
+            deferrable=deferrable,
+            initially=initially,
+            **dialect_kw,
+        )
+        t1.append_constraint(f)
+
+        return f
+
+    def unique_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        source: str,
+        local_cols: Sequence[str],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> UniqueConstraint:
+        t = sa_schema.Table(
+            source,
+            self.metadata(),
+            *[sa_schema.Column(n, NULLTYPE) for n in local_cols],
+            schema=schema,
+        )
+        kw["name"] = name
+        uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw)
+        # TODO: need event tests to ensure the event
+        # is fired off here
+        t.append_constraint(uq)
+        return uq
+
+    def check_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        source: str,
+        condition: Union[str, TextClause, ColumnElement[Any]],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> Union[CheckConstraint]:
+        t = sa_schema.Table(
+            source,
+            self.metadata(),
+            sa_schema.Column("x", Integer),
+            schema=schema,
+        )
+        ck = sa_schema.CheckConstraint(condition, name=name, **kw)
+        t.append_constraint(ck)
+        return ck
+
+    def generic_constraint(
+        self,
+        name: Optional[sqla_compat._ConstraintNameDefined],
+        table_name: str,
+        type_: Optional[str],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> Any:
+        t = self.table(table_name, schema=schema)
+        types: Dict[Optional[str], Any] = {
+            "foreignkey": lambda name: sa_schema.ForeignKeyConstraint(
+                [], [], name=name
+            ),
+            "primary": sa_schema.PrimaryKeyConstraint,
+            "unique": sa_schema.UniqueConstraint,
+            "check": lambda name: sa_schema.CheckConstraint("", name=name),
+            None: sa_schema.Constraint,
+        }
+        try:
+            const = types[type_]
+        except KeyError as ke:
+            raise TypeError(
+                "'type' can be one of %s"
+                % ", ".join(sorted(repr(x) for x in types))
+            ) from ke
+        else:
+            const = const(name=name)
+            t.append_constraint(const)
+            return const
+
+    def metadata(self) -> MetaData:
+        kw = {}
+        if (
+            self.migration_context is not None
+            and "target_metadata" in self.migration_context.opts
+        ):
+            mt = self.migration_context.opts["target_metadata"]
+            if hasattr(mt, "naming_convention"):
+                kw["naming_convention"] = mt.naming_convention
+        return sa_schema.MetaData(**kw)
+
+    def table(self, name: str, *columns, **kw) -> Table:
+        m = self.metadata()
+
+        cols = [
+            sqla_compat._copy(c) if c.table is not None else c
+            for c in columns
+            if isinstance(c, Column)
+        ]
+        # these flags have already added their UniqueConstraint /
+        # Index objects to the table, so flip them off here.
+        # SQLAlchemy tometadata() avoids this instead by preserving the
+        # flags and skipping the constraints that have _type_bound on them,
+        # but for a migration we'd rather list out the constraints
+        # explicitly.
+        _constraints_included = kw.pop("_constraints_included", False)
+        if _constraints_included:
+            for c in cols:
+                c.unique = c.index = False
+
+        t = sa_schema.Table(name, m, *cols, **kw)
+
+        constraints = [
+            sqla_compat._copy(elem, target_table=t)
+            if getattr(elem, "parent", None) is not t
+            and getattr(elem, "parent", None) is not None
+            else elem
+            for elem in columns
+            if isinstance(elem, (Constraint, Index))
+        ]
+
+        for const in constraints:
+            t.append_constraint(const)
+
+        for f in t.foreign_keys:
+            self._ensure_table_for_fk(m, f)
+        return t
+
+    def column(self, name: str, type_: TypeEngine, **kw) -> Column:
+        return sa_schema.Column(name, type_, **kw)
+
+    def index(
+        self,
+        name: Optional[str],
+        tablename: Optional[str],
+        columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
+        schema: Optional[str] = None,
+        **kw,
+    ) -> Index:
+        t = sa_schema.Table(
+            tablename or "no_table",
+            self.metadata(),
+            schema=schema,
+        )
+        kw["_table"] = t
+        idx = sa_schema.Index(
+            name,
+            *[util.sqla_compat._textual_index_column(t, n) for n in columns],
+            **kw,
+        )
+        return idx
+
+    def _parse_table_key(self, table_key: str) -> Tuple[Optional[str], str]:
+        if "." in table_key:
+            tokens = table_key.split(".")
+            sname: Optional[str] = ".".join(tokens[0:-1])
+            tname = tokens[-1]
+        else:
+            tname = table_key
+            sname = None
+        return (sname, tname)
+
+    def _ensure_table_for_fk(self, metadata: MetaData, fk: ForeignKey) -> None:
+        """create a placeholder Table object for the referent of a
+        ForeignKey.
+
+        """
+        if isinstance(fk._colspec, str):
+            table_key, cname = fk._colspec.rsplit(".", 1)
+            sname, tname = self._parse_table_key(table_key)
+            if table_key not in metadata.tables:
+                rel_t = sa_schema.Table(tname, metadata, schema=sname)
+            else:
+                rel_t = metadata.tables[table_key]
+            if cname not in rel_t.c:
+                rel_t.append_column(sa_schema.Column(cname, NULLTYPE))
diff --git a/venv/Lib/site-packages/alembic/operations/toimpl.py b/venv/Lib/site-packages/alembic/operations/toimpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..4759f7fd2aa7d118ba0e811d5cb207e8b28d173a
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/operations/toimpl.py
@@ -0,0 +1,226 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from typing import TYPE_CHECKING
+
+from sqlalchemy import schema as sa_schema
+
+from . import ops
+from .base import Operations
+from ..util.sqla_compat import _copy
+from ..util.sqla_compat import sqla_14
+
+if TYPE_CHECKING:
+    from sqlalchemy.sql.schema import Table
+
+
+@Operations.implementation_for(ops.AlterColumnOp)
+def alter_column(
+    operations: "Operations", operation: "ops.AlterColumnOp"
+) -> None:
+    compiler = operations.impl.dialect.statement_compiler(
+        operations.impl.dialect, None
+    )
+
+    existing_type = operation.existing_type
+    existing_nullable = operation.existing_nullable
+    existing_server_default = operation.existing_server_default
+    type_ = operation.modify_type
+    column_name = operation.column_name
+    table_name = operation.table_name
+    schema = operation.schema
+    server_default = operation.modify_server_default
+    new_column_name = operation.modify_name
+    nullable = operation.modify_nullable
+    comment = operation.modify_comment
+    existing_comment = operation.existing_comment
+
+    def _count_constraint(constraint):
+        return not isinstance(constraint, sa_schema.PrimaryKeyConstraint) and (
+            not constraint._create_rule or constraint._create_rule(compiler)
+        )
+
+    if existing_type and type_:
+        t = operations.schema_obj.table(
+            table_name,
+            sa_schema.Column(column_name, existing_type),
+            schema=schema,
+        )
+        for constraint in t.constraints:
+            if _count_constraint(constraint):
+                operations.impl.drop_constraint(constraint)
+
+    operations.impl.alter_column(
+        table_name,
+        column_name,
+        nullable=nullable,
+        server_default=server_default,
+        name=new_column_name,
+        type_=type_,
+        schema=schema,
+        existing_type=existing_type,
+        existing_server_default=existing_server_default,
+        existing_nullable=existing_nullable,
+        comment=comment,
+        existing_comment=existing_comment,
+        **operation.kw,
+    )
+
+    if type_:
+        t = operations.schema_obj.table(
+            table_name,
+            operations.schema_obj.column(column_name, type_),
+            schema=schema,
+        )
+        for constraint in t.constraints:
+            if _count_constraint(constraint):
+                operations.impl.add_constraint(constraint)
+
+
+@Operations.implementation_for(ops.DropTableOp)
+def drop_table(operations: "Operations", operation: "ops.DropTableOp") -> None:
+    operations.impl.drop_table(
+        operation.to_table(operations.migration_context)
+    )
+
+
+@Operations.implementation_for(ops.DropColumnOp)
+def drop_column(
+    operations: "Operations", operation: "ops.DropColumnOp"
+) -> None:
+    column = operation.to_column(operations.migration_context)
+    operations.impl.drop_column(
+        operation.table_name, column, schema=operation.schema, **operation.kw
+    )
+
+
+@Operations.implementation_for(ops.CreateIndexOp)
+def create_index(
+    operations: "Operations", operation: "ops.CreateIndexOp"
+) -> None:
+    idx = operation.to_index(operations.migration_context)
+    kw = {}
+    if operation.if_not_exists is not None:
+        if not sqla_14:
+            raise NotImplementedError("SQLAlchemy 1.4+ required")
+
+        kw["if_not_exists"] = operation.if_not_exists
+    operations.impl.create_index(idx, **kw)
+
+
+@Operations.implementation_for(ops.DropIndexOp)
+def drop_index(operations: "Operations", operation: "ops.DropIndexOp") -> None:
+    kw = {}
+    if operation.if_exists is not None:
+        if not sqla_14:
+            raise NotImplementedError("SQLAlchemy 1.4+ required")
+
+        kw["if_exists"] = operation.if_exists
+
+    operations.impl.drop_index(
+        operation.to_index(operations.migration_context),
+        **kw,
+    )
+
+
+@Operations.implementation_for(ops.CreateTableOp)
+def create_table(
+    operations: "Operations", operation: "ops.CreateTableOp"
+) -> "Table":
+    table = operation.to_table(operations.migration_context)
+    operations.impl.create_table(table)
+    return table
+
+
+@Operations.implementation_for(ops.RenameTableOp)
+def rename_table(
+    operations: "Operations", operation: "ops.RenameTableOp"
+) -> None:
+    operations.impl.rename_table(
+        operation.table_name, operation.new_table_name, schema=operation.schema
+    )
+
+
+@Operations.implementation_for(ops.CreateTableCommentOp)
+def create_table_comment(
+    operations: "Operations", operation: "ops.CreateTableCommentOp"
+) -> None:
+    table = operation.to_table(operations.migration_context)
+    operations.impl.create_table_comment(table)
+
+
+@Operations.implementation_for(ops.DropTableCommentOp)
+def drop_table_comment(
+    operations: "Operations", operation: "ops.DropTableCommentOp"
+) -> None:
+    table = operation.to_table(operations.migration_context)
+    operations.impl.drop_table_comment(table)
+
+
+@Operations.implementation_for(ops.AddColumnOp)
+def add_column(operations: "Operations", operation: "ops.AddColumnOp") -> None:
+    table_name = operation.table_name
+    column = operation.column
+    schema = operation.schema
+    kw = operation.kw
+
+    if column.table is not None:
+        column = _copy(column)
+
+    t = operations.schema_obj.table(table_name, column, schema=schema)
+    operations.impl.add_column(table_name, column, schema=schema, **kw)
+
+    for constraint in t.constraints:
+        if not isinstance(constraint, sa_schema.PrimaryKeyConstraint):
+            operations.impl.add_constraint(constraint)
+    for index in t.indexes:
+        operations.impl.create_index(index)
+
+    with_comment = (
+        operations.impl.dialect.supports_comments
+        and not operations.impl.dialect.inline_comments
+    )
+    comment = column.comment
+    if comment and with_comment:
+        operations.impl.create_column_comment(column)
+
+
+@Operations.implementation_for(ops.AddConstraintOp)
+def create_constraint(
+    operations: "Operations", operation: "ops.AddConstraintOp"
+) -> None:
+    operations.impl.add_constraint(
+        operation.to_constraint(operations.migration_context)
+    )
+
+
+@Operations.implementation_for(ops.DropConstraintOp)
+def drop_constraint(
+    operations: "Operations", operation: "ops.DropConstraintOp"
+) -> None:
+    operations.impl.drop_constraint(
+        operations.schema_obj.generic_constraint(
+            operation.constraint_name,
+            operation.table_name,
+            operation.constraint_type,
+            schema=operation.schema,
+        )
+    )
+
+
+@Operations.implementation_for(ops.BulkInsertOp)
+def bulk_insert(
+    operations: "Operations", operation: "ops.BulkInsertOp"
+) -> None:
+    operations.impl.bulk_insert(  # type: ignore[union-attr]
+        operation.table, operation.rows, multiinsert=operation.multiinsert
+    )
+
+
+@Operations.implementation_for(ops.ExecuteSQLOp)
+def execute_sql(
+    operations: "Operations", operation: "ops.ExecuteSQLOp"
+) -> None:
+    operations.migration_context.impl.execute(
+        operation.sqltext, execution_options=operation.execution_options
+    )
diff --git a/venv/Lib/site-packages/alembic/py.typed b/venv/Lib/site-packages/alembic/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/Lib/site-packages/alembic/runtime/__init__.py b/venv/Lib/site-packages/alembic/runtime/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/Lib/site-packages/alembic/runtime/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/runtime/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..849b0914c28fdb131fd0f016d5f479aef2cfd98b
Binary files /dev/null and b/venv/Lib/site-packages/alembic/runtime/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/runtime/__pycache__/environment.cpython-311.pyc b/venv/Lib/site-packages/alembic/runtime/__pycache__/environment.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dcff91ffc64cf0bcba2aa36512394ead901778de
Binary files /dev/null and b/venv/Lib/site-packages/alembic/runtime/__pycache__/environment.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/runtime/__pycache__/migration.cpython-311.pyc b/venv/Lib/site-packages/alembic/runtime/__pycache__/migration.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23857d0d49a0d403b7142b11ab270a3834b48693
Binary files /dev/null and b/venv/Lib/site-packages/alembic/runtime/__pycache__/migration.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/runtime/environment.py b/venv/Lib/site-packages/alembic/runtime/environment.py
new file mode 100644
index 0000000000000000000000000000000000000000..d64b2adc279761b40724b2c7c7c7f53da1e77019
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/runtime/environment.py
@@ -0,0 +1,1053 @@
+from __future__ import annotations
+
+from typing import Any
+from typing import Callable
+from typing import Collection
+from typing import ContextManager
+from typing import Dict
+from typing import List
+from typing import Mapping
+from typing import MutableMapping
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import TextIO
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy.sql.schema import Column
+from sqlalchemy.sql.schema import FetchedValue
+from typing_extensions import Literal
+
+from .migration import _ProxyTransaction
+from .migration import MigrationContext
+from .. import util
+from ..operations import Operations
+from ..script.revision import _GetRevArg
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine import URL
+    from sqlalchemy.engine.base import Connection
+    from sqlalchemy.sql import Executable
+    from sqlalchemy.sql.schema import MetaData
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.type_api import TypeEngine
+
+    from .migration import MigrationInfo
+    from ..autogenerate.api import AutogenContext
+    from ..config import Config
+    from ..ddl import DefaultImpl
+    from ..operations.ops import MigrationScript
+    from ..script.base import ScriptDirectory
+
+_RevNumber = Optional[Union[str, Tuple[str, ...]]]
+
+ProcessRevisionDirectiveFn = Callable[
+    [MigrationContext, _GetRevArg, List["MigrationScript"]], None
+]
+
+RenderItemFn = Callable[
+    [str, Any, "AutogenContext"], Union[str, Literal[False]]
+]
+
+NameFilterType = Literal[
+    "schema",
+    "table",
+    "column",
+    "index",
+    "unique_constraint",
+    "foreign_key_constraint",
+]
+NameFilterParentNames = MutableMapping[
+    Literal["schema_name", "table_name", "schema_qualified_table_name"],
+    Optional[str],
+]
+IncludeNameFn = Callable[
+    [Optional[str], NameFilterType, NameFilterParentNames], bool
+]
+
+IncludeObjectFn = Callable[
+    [
+        "SchemaItem",
+        Optional[str],
+        NameFilterType,
+        bool,
+        Optional["SchemaItem"],
+    ],
+    bool,
+]
+
+OnVersionApplyFn = Callable[
+    [MigrationContext, "MigrationInfo", Collection[Any], Mapping[str, Any]],
+    None,
+]
+
+CompareServerDefault = Callable[
+    [
+        MigrationContext,
+        "Column[Any]",
+        "Column[Any]",
+        Optional[str],
+        Optional[FetchedValue],
+        Optional[str],
+    ],
+    Optional[bool],
+]
+
+CompareType = Callable[
+    [
+        MigrationContext,
+        "Column[Any]",
+        "Column[Any]",
+        "TypeEngine[Any]",
+        "TypeEngine[Any]",
+    ],
+    Optional[bool],
+]
+
+
+class EnvironmentContext(util.ModuleClsProxy):
+
+    """A configurational facade made available in an ``env.py`` script.
+
+    The :class:`.EnvironmentContext` acts as a *facade* to the more
+    nuts-and-bolts objects of :class:`.MigrationContext` as well as certain
+    aspects of :class:`.Config`,
+    within the context of the ``env.py`` script that is invoked by
+    most Alembic commands.
+
+    :class:`.EnvironmentContext` is normally instantiated
+    when a command in :mod:`alembic.command` is run.  It then makes
+    itself available in the ``alembic.context`` module for the scope
+    of the command.   From within an ``env.py`` script, the current
+    :class:`.EnvironmentContext` is available by importing this module.
+
+    :class:`.EnvironmentContext` also supports programmatic usage.
+    At this level, it acts as a Python context manager, that is, is
+    intended to be used using the
+    ``with:`` statement.  A typical use of :class:`.EnvironmentContext`::
+
+        from alembic.config import Config
+        from alembic.script import ScriptDirectory
+
+        config = Config()
+        config.set_main_option("script_location", "myapp:migrations")
+        script = ScriptDirectory.from_config(config)
+
+
+        def my_function(rev, context):
+            '''do something with revision "rev", which
+            will be the current database revision,
+            and "context", which is the MigrationContext
+            that the env.py will create'''
+
+
+        with EnvironmentContext(
+            config,
+            script,
+            fn=my_function,
+            as_sql=False,
+            starting_rev="base",
+            destination_rev="head",
+            tag="sometag",
+        ):
+            script.run_env()
+
+    The above script will invoke the ``env.py`` script
+    within the migration environment.  If and when ``env.py``
+    calls :meth:`.MigrationContext.run_migrations`, the
+    ``my_function()`` function above will be called
+    by the :class:`.MigrationContext`, given the context
+    itself as well as the current revision in the database.
+
+    .. note::
+
+        For most API usages other than full blown
+        invocation of migration scripts, the :class:`.MigrationContext`
+        and :class:`.ScriptDirectory` objects can be created and
+        used directly.  The :class:`.EnvironmentContext` object
+        is *only* needed when you need to actually invoke the
+        ``env.py`` module present in the migration environment.
+
+    """
+
+    _migration_context: Optional[MigrationContext] = None
+
+    config: Config = None  # type:ignore[assignment]
+    """An instance of :class:`.Config` representing the
+    configuration file contents as well as other variables
+    set programmatically within it."""
+
+    script: ScriptDirectory = None  # type:ignore[assignment]
+    """An instance of :class:`.ScriptDirectory` which provides
+    programmatic access to version files within the ``versions/``
+    directory.
+
+    """
+
+    def __init__(
+        self, config: Config, script: ScriptDirectory, **kw: Any
+    ) -> None:
+        r"""Construct a new :class:`.EnvironmentContext`.
+
+        :param config: a :class:`.Config` instance.
+        :param script: a :class:`.ScriptDirectory` instance.
+        :param \**kw: keyword options that will be ultimately
+         passed along to the :class:`.MigrationContext` when
+         :meth:`.EnvironmentContext.configure` is called.
+
+        """
+        self.config = config
+        self.script = script
+        self.context_opts = kw
+
+    def __enter__(self) -> EnvironmentContext:
+        """Establish a context which provides a
+        :class:`.EnvironmentContext` object to
+        env.py scripts.
+
+        The :class:`.EnvironmentContext` will
+        be made available as ``from alembic import context``.
+
+        """
+        self._install_proxy()
+        return self
+
+    def __exit__(self, *arg: Any, **kw: Any) -> None:
+        self._remove_proxy()
+
+    def is_offline_mode(self) -> bool:
+        """Return True if the current migrations environment
+        is running in "offline mode".
+
+        This is ``True`` or ``False`` depending
+        on the ``--sql`` flag passed.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        """
+        return self.context_opts.get("as_sql", False)  # type: ignore[no-any-return]  # noqa: E501
+
+    def is_transactional_ddl(self) -> bool:
+        """Return True if the context is configured to expect a
+        transactional DDL capable backend.
+
+        This defaults to the type of database in use, and
+        can be overridden by the ``transactional_ddl`` argument
+        to :meth:`.configure`
+
+        This function requires that a :class:`.MigrationContext`
+        has first been made available via :meth:`.configure`.
+
+        """
+        return self.get_context().impl.transactional_ddl
+
+    def requires_connection(self) -> bool:
+        return not self.is_offline_mode()
+
+    def get_head_revision(self) -> _RevNumber:
+        """Return the hex identifier of the 'head' script revision.
+
+        If the script directory has multiple heads, this
+        method raises a :class:`.CommandError`;
+        :meth:`.EnvironmentContext.get_head_revisions` should be preferred.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        .. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
+
+        """
+        return self.script.as_revision_number("head")
+
+    def get_head_revisions(self) -> _RevNumber:
+        """Return the hex identifier of the 'heads' script revision(s).
+
+        This returns a tuple containing the version number of all
+        heads in the script directory.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        """
+        return self.script.as_revision_number("heads")
+
+    def get_starting_revision_argument(self) -> _RevNumber:
+        """Return the 'starting revision' argument,
+        if the revision was passed using ``start:end``.
+
+        This is only meaningful in "offline" mode.
+        Returns ``None`` if no value is available
+        or was configured.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        """
+        if self._migration_context is not None:
+            return self.script.as_revision_number(
+                self.get_context()._start_from_rev
+            )
+        elif "starting_rev" in self.context_opts:
+            return self.script.as_revision_number(
+                self.context_opts["starting_rev"]
+            )
+        else:
+            # this should raise only in the case that a command
+            # is being run where the "starting rev" is never applicable;
+            # this is to catch scripts which rely upon this in
+            # non-sql mode or similar
+            raise util.CommandError(
+                "No starting revision argument is available."
+            )
+
+    def get_revision_argument(self) -> _RevNumber:
+        """Get the 'destination' revision argument.
+
+        This is typically the argument passed to the
+        ``upgrade`` or ``downgrade`` command.
+
+        If it was specified as ``head``, the actual
+        version number is returned; if specified
+        as ``base``, ``None`` is returned.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        """
+        return self.script.as_revision_number(
+            self.context_opts["destination_rev"]
+        )
+
+    def get_tag_argument(self) -> Optional[str]:
+        """Return the value passed for the ``--tag`` argument, if any.
+
+        The ``--tag`` argument is not used directly by Alembic,
+        but is available for custom ``env.py`` configurations that
+        wish to use it; particularly for offline generation scripts
+        that wish to generate tagged filenames.
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        .. seealso::
+
+            :meth:`.EnvironmentContext.get_x_argument` - a newer and more
+            open ended system of extending ``env.py`` scripts via the command
+            line.
+
+        """
+        return self.context_opts.get("tag", None)  # type: ignore[no-any-return]  # noqa: E501
+
+    @overload
+    def get_x_argument(self, as_dictionary: Literal[False]) -> List[str]:
+        ...
+
+    @overload
+    def get_x_argument(self, as_dictionary: Literal[True]) -> Dict[str, str]:
+        ...
+
+    @overload
+    def get_x_argument(
+        self, as_dictionary: bool = ...
+    ) -> Union[List[str], Dict[str, str]]:
+        ...
+
+    def get_x_argument(
+        self, as_dictionary: bool = False
+    ) -> Union[List[str], Dict[str, str]]:
+        """Return the value(s) passed for the ``-x`` argument, if any.
+
+        The ``-x`` argument is an open ended flag that allows any user-defined
+        value or values to be passed on the command line, then available
+        here for consumption by a custom ``env.py`` script.
+
+        The return value is a list, returned directly from the ``argparse``
+        structure.  If ``as_dictionary=True`` is passed, the ``x`` arguments
+        are parsed using ``key=value`` format into a dictionary that is
+        then returned. If there is no ``=`` in the argument, value is an empty
+        string.
+
+        .. versionchanged:: 1.13.1 Support ``as_dictionary=True`` when
+           arguments are passed without the ``=`` symbol.
+
+        For example, to support passing a database URL on the command line,
+        the standard ``env.py`` script can be modified like this::
+
+            cmd_line_url = context.get_x_argument(
+                as_dictionary=True).get('dbname')
+            if cmd_line_url:
+                engine = create_engine(cmd_line_url)
+            else:
+                engine = engine_from_config(
+                        config.get_section(config.config_ini_section),
+                        prefix='sqlalchemy.',
+                        poolclass=pool.NullPool)
+
+        This then takes effect by running the ``alembic`` script as::
+
+            alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
+
+        This function does not require that the :class:`.MigrationContext`
+        has been configured.
+
+        .. seealso::
+
+            :meth:`.EnvironmentContext.get_tag_argument`
+
+            :attr:`.Config.cmd_opts`
+
+        """
+        if self.config.cmd_opts is not None:
+            value = self.config.cmd_opts.x or []
+        else:
+            value = []
+        if as_dictionary:
+            dict_value = {}
+            for arg in value:
+                x_key, _, x_value = arg.partition("=")
+                dict_value[x_key] = x_value
+            value = dict_value
+
+        return value
+
+    def configure(
+        self,
+        connection: Optional[Connection] = None,
+        url: Optional[Union[str, URL]] = None,
+        dialect_name: Optional[str] = None,
+        dialect_opts: Optional[Dict[str, Any]] = None,
+        transactional_ddl: Optional[bool] = None,
+        transaction_per_migration: bool = False,
+        output_buffer: Optional[TextIO] = None,
+        starting_rev: Optional[str] = None,
+        tag: Optional[str] = None,
+        template_args: Optional[Dict[str, Any]] = None,
+        render_as_batch: bool = False,
+        target_metadata: Union[MetaData, Sequence[MetaData], None] = None,
+        include_name: Optional[IncludeNameFn] = None,
+        include_object: Optional[IncludeObjectFn] = None,
+        include_schemas: bool = False,
+        process_revision_directives: Optional[
+            ProcessRevisionDirectiveFn
+        ] = None,
+        compare_type: Union[bool, CompareType] = True,
+        compare_server_default: Union[bool, CompareServerDefault] = False,
+        render_item: Optional[RenderItemFn] = None,
+        literal_binds: bool = False,
+        upgrade_token: str = "upgrades",
+        downgrade_token: str = "downgrades",
+        alembic_module_prefix: str = "op.",
+        sqlalchemy_module_prefix: str = "sa.",
+        user_module_prefix: Optional[str] = None,
+        on_version_apply: Optional[OnVersionApplyFn] = None,
+        **kw: Any,
+    ) -> None:
+        """Configure a :class:`.MigrationContext` within this
+        :class:`.EnvironmentContext` which will provide database
+        connectivity and other configuration to a series of
+        migration scripts.
+
+        Many methods on :class:`.EnvironmentContext` require that
+        this method has been called in order to function, as they
+        ultimately need to have database access or at least access
+        to the dialect in use.  Those which do are documented as such.
+
+        The important thing needed by :meth:`.configure` is a
+        means to determine what kind of database dialect is in use.
+        An actual connection to that database is needed only if
+        the :class:`.MigrationContext` is to be used in
+        "online" mode.
+
+        If the :meth:`.is_offline_mode` function returns ``True``,
+        then no connection is needed here.  Otherwise, the
+        ``connection`` parameter should be present as an
+        instance of :class:`sqlalchemy.engine.Connection`.
+
+        This function is typically called from the ``env.py``
+        script within a migration environment.  It can be called
+        multiple times for an invocation.  The most recent
+        :class:`~sqlalchemy.engine.Connection`
+        for which it was called is the one that will be operated upon
+        by the next call to :meth:`.run_migrations`.
+
+        General parameters:
+
+        :param connection: a :class:`~sqlalchemy.engine.Connection`
+         to use
+         for SQL execution in "online" mode.  When present, is also
+         used to determine the type of dialect in use.
+        :param url: a string database url, or a
+         :class:`sqlalchemy.engine.url.URL` object.
+         The type of dialect to be used will be derived from this if
+         ``connection`` is not passed.
+        :param dialect_name: string name of a dialect, such as
+         "postgresql", "mssql", etc.
+         The type of dialect to be used will be derived from this if
+         ``connection`` and ``url`` are not passed.
+        :param dialect_opts: dictionary of options to be passed to dialect
+         constructor.
+        :param transactional_ddl: Force the usage of "transactional"
+         DDL on or off;
+         this otherwise defaults to whether or not the dialect in
+         use supports it.
+        :param transaction_per_migration: if True, nest each migration script
+         in a transaction rather than the full series of migrations to
+         run.
+        :param output_buffer: a file-like object that will be used
+         for textual output
+         when the ``--sql`` option is used to generate SQL scripts.
+         Defaults to
+         ``sys.stdout`` if not passed here and also not present on
+         the :class:`.Config`
+         object.  The value here overrides that of the :class:`.Config`
+         object.
+        :param output_encoding: when using ``--sql`` to generate SQL
+         scripts, apply this encoding to the string output.
+        :param literal_binds: when using ``--sql`` to generate SQL
+         scripts, pass through the ``literal_binds`` flag to the compiler
+         so that any literal values that would ordinarily be bound
+         parameters are converted to plain strings.
+
+         .. warning:: Dialects can typically only handle simple datatypes
+            like strings and numbers for auto-literal generation.  Datatypes
+            like dates, intervals, and others may still require manual
+            formatting, typically using :meth:`.Operations.inline_literal`.
+
+         .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
+            versions prior to 0.8 where this feature is not supported.
+
+         .. seealso::
+
+            :meth:`.Operations.inline_literal`
+
+        :param starting_rev: Override the "starting revision" argument
+         when using ``--sql`` mode.
+        :param tag: a string tag for usage by custom ``env.py`` scripts.
+         Set via the ``--tag`` option, can be overridden here.
+        :param template_args: dictionary of template arguments which
+         will be added to the template argument environment when
+         running the "revision" command.   Note that the script environment
+         is only run within the "revision" command if the --autogenerate
+         option is used, or if the option "revision_environment=true"
+         is present in the alembic.ini file.
+
+        :param version_table: The name of the Alembic version table.
+         The default is ``'alembic_version'``.
+        :param version_table_schema: Optional schema to place version
+         table within.
+        :param version_table_pk: boolean, whether the Alembic version table
+         should use a primary key constraint for the "value" column; this
+         only takes effect when the table is first created.
+         Defaults to True; setting to False should not be necessary and is
+         here for backwards compatibility reasons.
+        :param on_version_apply: a callable or collection of callables to be
+            run for each migration step.
+            The callables will be run in the order they are given, once for
+            each migration step, after the respective operation has been
+            applied but before its transaction is finalized.
+            Each callable accepts no positional arguments and the following
+            keyword arguments:
+
+            * ``ctx``: the :class:`.MigrationContext` running the migration,
+            * ``step``: a :class:`.MigrationInfo` representing the
+              step currently being applied,
+            * ``heads``: a collection of version strings representing the
+              current heads,
+            * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`.
+
+        Parameters specific to the autogenerate feature, when
+        ``alembic revision`` is run with the ``--autogenerate`` feature:
+
+        :param target_metadata: a :class:`sqlalchemy.schema.MetaData`
+         object, or a sequence of :class:`~sqlalchemy.schema.MetaData`
+         objects, that will be consulted during autogeneration.
+         The tables present in each :class:`~sqlalchemy.schema.MetaData`
+         will be compared against
+         what is locally available on the target
+         :class:`~sqlalchemy.engine.Connection`
+         to produce candidate upgrade/downgrade operations.
+        :param compare_type: Indicates type comparison behavior during
+         an autogenerate
+         operation.  Defaults to ``True`` turning on type comparison, which
+         has good accuracy on most backends.   See :ref:`compare_types`
+         for an example as well as information on other type
+         comparison options. Set to ``False`` which disables type
+         comparison. A callable can also be passed to provide custom type
+         comparison, see :ref:`compare_types` for additional details.
+
+         .. versionchanged:: 1.12.0 The default value of
+            :paramref:`.EnvironmentContext.configure.compare_type` has been
+            changed to ``True``.
+
+         .. seealso::
+
+            :ref:`compare_types`
+
+            :paramref:`.EnvironmentContext.configure.compare_server_default`
+
+        :param compare_server_default: Indicates server default comparison
+         behavior during
+         an autogenerate operation.  Defaults to ``False`` which disables
+         server default
+         comparison.  Set to  ``True`` to turn on server default comparison,
+         which has
+         varied accuracy depending on backend.
+
+         To customize server default comparison behavior, a callable may
+         be specified
+         which can filter server default comparisons during an
+         autogenerate operation.
+         defaults during an autogenerate operation.   The format of this
+         callable is::
+
+            def my_compare_server_default(context, inspected_column,
+                        metadata_column, inspected_default, metadata_default,
+                        rendered_metadata_default):
+                # return True if the defaults are different,
+                # False if not, or None to allow the default implementation
+                # to compare these defaults
+                return None
+
+            context.configure(
+                # ...
+                compare_server_default = my_compare_server_default
+            )
+
+         ``inspected_column`` is a dictionary structure as returned by
+         :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
+         ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
+         the local model environment.
+
+         A return value of ``None`` indicates to allow default server default
+         comparison
+         to proceed.  Note that some backends such as Postgresql actually
+         execute
+         the two defaults on the database side to compare for equivalence.
+
+         .. seealso::
+
+            :paramref:`.EnvironmentContext.configure.compare_type`
+
+        :param include_name: A callable function which is given
+         the chance to return ``True`` or ``False`` for any database reflected
+         object based on its name, including database schema names when
+         the :paramref:`.EnvironmentContext.configure.include_schemas` flag
+         is set to ``True``.
+
+         The function accepts the following positional arguments:
+
+         * ``name``: the name of the object, such as schema name or table name.
+           Will be ``None`` when indicating the default schema name of the
+           database connection.
+         * ``type``: a string describing the type of object; currently
+           ``"schema"``, ``"table"``, ``"column"``, ``"index"``,
+           ``"unique_constraint"``, or ``"foreign_key_constraint"``
+         * ``parent_names``: a dictionary of "parent" object names, that are
+           relative to the name being given.  Keys in this dictionary may
+           include:  ``"schema_name"``, ``"table_name"`` or
+           ``"schema_qualified_table_name"``.
+
+         E.g.::
+
+            def include_name(name, type_, parent_names):
+                if type_ == "schema":
+                    return name in ["schema_one", "schema_two"]
+                else:
+                    return True
+
+            context.configure(
+                # ...
+                include_schemas = True,
+                include_name = include_name
+            )
+
+         .. seealso::
+
+            :ref:`autogenerate_include_hooks`
+
+            :paramref:`.EnvironmentContext.configure.include_object`
+
+            :paramref:`.EnvironmentContext.configure.include_schemas`
+
+
+        :param include_object: A callable function which is given
+         the chance to return ``True`` or ``False`` for any object,
+         indicating if the given object should be considered in the
+         autogenerate sweep.
+
+         The function accepts the following positional arguments:
+
+         * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
+           as a :class:`~sqlalchemy.schema.Table`,
+           :class:`~sqlalchemy.schema.Column`,
+           :class:`~sqlalchemy.schema.Index`
+           :class:`~sqlalchemy.schema.UniqueConstraint`,
+           or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
+         * ``name``: the name of the object. This is typically available
+           via ``object.name``.
+         * ``type``: a string describing the type of object; currently
+           ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
+           or ``"foreign_key_constraint"``
+         * ``reflected``: ``True`` if the given object was produced based on
+           table reflection, ``False`` if it's from a local :class:`.MetaData`
+           object.
+         * ``compare_to``: the object being compared against, if available,
+           else ``None``.
+
+         E.g.::
+
+            def include_object(object, name, type_, reflected, compare_to):
+                if (type_ == "column" and
+                    not reflected and
+                    object.info.get("skip_autogenerate", False)):
+                    return False
+                else:
+                    return True
+
+            context.configure(
+                # ...
+                include_object = include_object
+            )
+
+         For the use case of omitting specific schemas from a target database
+         when :paramref:`.EnvironmentContext.configure.include_schemas` is
+         set to ``True``, the :attr:`~sqlalchemy.schema.Table.schema`
+         attribute can be checked for each :class:`~sqlalchemy.schema.Table`
+         object passed to the hook, however it is much more efficient
+         to filter on schemas before reflection of objects takes place
+         using the :paramref:`.EnvironmentContext.configure.include_name`
+         hook.
+
+         .. seealso::
+
+            :ref:`autogenerate_include_hooks`
+
+            :paramref:`.EnvironmentContext.configure.include_name`
+
+            :paramref:`.EnvironmentContext.configure.include_schemas`
+
+        :param render_as_batch: if True, commands which alter elements
+         within a table will be placed under a ``with batch_alter_table():``
+         directive, so that batch migrations will take place.
+
+         .. seealso::
+
+            :ref:`batch_migrations`
+
+        :param include_schemas: If True, autogenerate will scan across
+         all schemas located by the SQLAlchemy
+         :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
+         method, and include all differences in tables found across all
+         those schemas.  When using this option, you may want to also
+         use the :paramref:`.EnvironmentContext.configure.include_name`
+         parameter to specify a callable which
+         can filter the tables/schemas that get included.
+
+         .. seealso::
+
+            :ref:`autogenerate_include_hooks`
+
+            :paramref:`.EnvironmentContext.configure.include_name`
+
+            :paramref:`.EnvironmentContext.configure.include_object`
+
+        :param render_item: Callable that can be used to override how
+         any schema item, i.e. column, constraint, type,
+         etc., is rendered for autogenerate.  The callable receives a
+         string describing the type of object, the object, and
+         the autogen context.  If it returns False, the
+         default rendering method will be used.  If it returns None,
+         the item will not be rendered in the context of a Table
+         construct, that is, can be used to skip columns or constraints
+         within op.create_table()::
+
+            def my_render_column(type_, col, autogen_context):
+                if type_ == "column" and isinstance(col, MySpecialCol):
+                    return repr(col)
+                else:
+                    return False
+
+            context.configure(
+                # ...
+                render_item = my_render_column
+            )
+
+         Available values for the type string include: ``"column"``,
+         ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
+         ``"type"``, ``"server_default"``.
+
+         .. seealso::
+
+            :ref:`autogen_render_types`
+
+        :param upgrade_token: When autogenerate completes, the text of the
+         candidate upgrade operations will be present in this template
+         variable when ``script.py.mako`` is rendered.  Defaults to
+         ``upgrades``.
+        :param downgrade_token: When autogenerate completes, the text of the
+         candidate downgrade operations will be present in this
+         template variable when ``script.py.mako`` is rendered.  Defaults to
+         ``downgrades``.
+
+        :param alembic_module_prefix: When autogenerate refers to Alembic
+         :mod:`alembic.operations` constructs, this prefix will be used
+         (i.e. ``op.create_table``)  Defaults to "``op.``".
+         Can be ``None`` to indicate no prefix.
+
+        :param sqlalchemy_module_prefix: When autogenerate refers to
+         SQLAlchemy
+         :class:`~sqlalchemy.schema.Column` or type classes, this prefix
+         will be used
+         (i.e. ``sa.Column("somename", sa.Integer)``)  Defaults to "``sa.``".
+         Can be ``None`` to indicate no prefix.
+         Note that when dialect-specific types are rendered, autogenerate
+         will render them using the dialect module name, i.e. ``mssql.BIT()``,
+         ``postgresql.UUID()``.
+
+        :param user_module_prefix: When autogenerate refers to a SQLAlchemy
+         type (e.g. :class:`.TypeEngine`) where the module name is not
+         under the ``sqlalchemy`` namespace, this prefix will be used
+         within autogenerate.  If left at its default of
+         ``None``, the ``__module__`` attribute of the type is used to
+         render the import module.   It's a good practice to set this
+         and to have all custom types be available from a fixed module space,
+         in order to future-proof migration files against reorganizations
+         in modules.
+
+         .. seealso::
+
+            :ref:`autogen_module_prefix`
+
+        :param process_revision_directives: a callable function that will
+         be passed a structure representing the end result of an autogenerate
+         or plain "revision" operation, which can be manipulated to affect
+         how the ``alembic revision`` command ultimately outputs new
+         revision scripts.   The structure of the callable is::
+
+            def process_revision_directives(context, revision, directives):
+                pass
+
+         The ``directives`` parameter is a Python list containing
+         a single :class:`.MigrationScript` directive, which represents
+         the revision file to be generated.    This list as well as its
+         contents may be freely modified to produce any set of commands.
+         The section :ref:`customizing_revision` shows an example of
+         doing this.  The ``context`` parameter is the
+         :class:`.MigrationContext` in use,
+         and ``revision`` is a tuple of revision identifiers representing the
+         current revision of the database.
+
+         The callable is invoked at all times when the ``--autogenerate``
+         option is passed to ``alembic revision``.  If ``--autogenerate``
+         is not passed, the callable is invoked only if the
+         ``revision_environment`` variable is set to True in the Alembic
+         configuration, in which case the given ``directives`` collection
+         will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps`
+         collections for ``.upgrade_ops`` and ``.downgrade_ops``.  The
+         ``--autogenerate`` option itself can be inferred by inspecting
+         ``context.config.cmd_opts.autogenerate``.
+
+         The callable function may optionally be an instance of
+         a :class:`.Rewriter` object.  This is a helper object that
+         assists in the production of autogenerate-stream rewriter functions.
+
+         .. seealso::
+
+             :ref:`customizing_revision`
+
+             :ref:`autogen_rewriter`
+
+             :paramref:`.command.revision.process_revision_directives`
+
+        Parameters specific to individual backends:
+
+        :param mssql_batch_separator: The "batch separator" which will
+         be placed between each statement when generating offline SQL Server
+         migrations.  Defaults to ``GO``.  Note this is in addition to the
+         customary semicolon ``;`` at the end of each statement; SQL Server
+         considers the "batch separator" to denote the end of an
+         individual statement execution, and cannot group certain
+         dependent operations in one step.
+        :param oracle_batch_separator: The "batch separator" which will
+         be placed between each statement when generating offline
+         Oracle migrations.  Defaults to ``/``.  Oracle doesn't add a
+         semicolon between statements like most other backends.
+
+        """
+        opts = self.context_opts
+        if transactional_ddl is not None:
+            opts["transactional_ddl"] = transactional_ddl
+        if output_buffer is not None:
+            opts["output_buffer"] = output_buffer
+        elif self.config.output_buffer is not None:
+            opts["output_buffer"] = self.config.output_buffer
+        if starting_rev:
+            opts["starting_rev"] = starting_rev
+        if tag:
+            opts["tag"] = tag
+        if template_args and "template_args" in opts:
+            opts["template_args"].update(template_args)
+        opts["transaction_per_migration"] = transaction_per_migration
+        opts["target_metadata"] = target_metadata
+        opts["include_name"] = include_name
+        opts["include_object"] = include_object
+        opts["include_schemas"] = include_schemas
+        opts["render_as_batch"] = render_as_batch
+        opts["upgrade_token"] = upgrade_token
+        opts["downgrade_token"] = downgrade_token
+        opts["sqlalchemy_module_prefix"] = sqlalchemy_module_prefix
+        opts["alembic_module_prefix"] = alembic_module_prefix
+        opts["user_module_prefix"] = user_module_prefix
+        opts["literal_binds"] = literal_binds
+        opts["process_revision_directives"] = process_revision_directives
+        opts["on_version_apply"] = util.to_tuple(on_version_apply, default=())
+
+        if render_item is not None:
+            opts["render_item"] = render_item
+        opts["compare_type"] = compare_type
+        if compare_server_default is not None:
+            opts["compare_server_default"] = compare_server_default
+        opts["script"] = self.script
+
+        opts.update(kw)
+
+        self._migration_context = MigrationContext.configure(
+            connection=connection,
+            url=url,
+            dialect_name=dialect_name,
+            environment_context=self,
+            dialect_opts=dialect_opts,
+            opts=opts,
+        )
+
+    def run_migrations(self, **kw: Any) -> None:
+        """Run migrations as determined by the current command line
+        configuration
+        as well as versioning information present (or not) in the current
+        database connection (if one is present).
+
+        The function accepts optional ``**kw`` arguments.   If these are
+        passed, they are sent directly to the ``upgrade()`` and
+        ``downgrade()``
+        functions within each target revision file.   By modifying the
+        ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
+        functions accept arguments, parameters can be passed here so that
+        contextual information, usually information to identify a particular
+        database in use, can be passed from a custom ``env.py`` script
+        to the migration functions.
+
+        This function requires that a :class:`.MigrationContext` has
+        first been made available via :meth:`.configure`.
+
+        """
+        assert self._migration_context is not None
+        with Operations.context(self._migration_context):
+            self.get_context().run_migrations(**kw)
+
+    def execute(
+        self,
+        sql: Union[Executable, str],
+        execution_options: Optional[Dict[str, Any]] = None,
+    ) -> None:
+        """Execute the given SQL using the current change context.
+
+        The behavior of :meth:`.execute` is the same
+        as that of :meth:`.Operations.execute`.  Please see that
+        function's documentation for full detail including
+        caveats and limitations.
+
+        This function requires that a :class:`.MigrationContext` has
+        first been made available via :meth:`.configure`.
+
+        """
+        self.get_context().execute(sql, execution_options=execution_options)
+
+    def static_output(self, text: str) -> None:
+        """Emit text directly to the "offline" SQL stream.
+
+        Typically this is for emitting comments that
+        start with --.  The statement is not treated
+        as a SQL execution, no ; or batch separator
+        is added, etc.
+
+        """
+        self.get_context().impl.static_output(text)
+
+    def begin_transaction(
+        self,
+    ) -> Union[_ProxyTransaction, ContextManager[None]]:
+        """Return a context manager that will
+        enclose an operation within a "transaction",
+        as defined by the environment's offline
+        and transactional DDL settings.
+
+        e.g.::
+
+            with context.begin_transaction():
+                context.run_migrations()
+
+        :meth:`.begin_transaction` is intended to
+        "do the right thing" regardless of
+        calling context:
+
+        * If :meth:`.is_transactional_ddl` is ``False``,
+          returns a "do nothing" context manager
+          which otherwise produces no transactional
+          state or directives.
+        * If :meth:`.is_offline_mode` is ``True``,
+          returns a context manager that will
+          invoke the :meth:`.DefaultImpl.emit_begin`
+          and :meth:`.DefaultImpl.emit_commit`
+          methods, which will produce the string
+          directives ``BEGIN`` and ``COMMIT`` on
+          the output stream, as rendered by the
+          target backend (e.g. SQL Server would
+          emit ``BEGIN TRANSACTION``).
+        * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
+          on the current online connection, which
+          returns a :class:`sqlalchemy.engine.Transaction`
+          object.  This object demarcates a real
+          transaction and is itself a context manager,
+          which will roll back if an exception
+          is raised.
+
+        Note that a custom ``env.py`` script which
+        has more specific transactional needs can of course
+        manipulate the :class:`~sqlalchemy.engine.Connection`
+        directly to produce transactional state in "online"
+        mode.
+
+        """
+
+        return self.get_context().begin_transaction()
+
+    def get_context(self) -> MigrationContext:
+        """Return the current :class:`.MigrationContext` object.
+
+        If :meth:`.EnvironmentContext.configure` has not been
+        called yet, raises an exception.
+
+        """
+
+        if self._migration_context is None:
+            raise Exception("No context has been configured yet.")
+        return self._migration_context
+
+    def get_bind(self) -> Connection:
+        """Return the current 'bind'.
+
+        In "online" mode, this is the
+        :class:`sqlalchemy.engine.Connection` currently being used
+        to emit SQL to the database.
+
+        This function requires that a :class:`.MigrationContext`
+        has first been made available via :meth:`.configure`.
+
+        """
+        return self.get_context().bind  # type: ignore[return-value]
+
+    def get_impl(self) -> DefaultImpl:
+        return self.get_context().impl
diff --git a/venv/Lib/site-packages/alembic/runtime/migration.py b/venv/Lib/site-packages/alembic/runtime/migration.py
new file mode 100644
index 0000000000000000000000000000000000000000..95c69bc692555e8614570691b0309b866bac2946
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/runtime/migration.py
@@ -0,0 +1,1396 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+from contextlib import contextmanager
+from contextlib import nullcontext
+import logging
+import sys
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Collection
+from typing import ContextManager
+from typing import Dict
+from typing import Iterable
+from typing import Iterator
+from typing import List
+from typing import Optional
+from typing import Set
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from sqlalchemy import Column
+from sqlalchemy import literal_column
+from sqlalchemy import MetaData
+from sqlalchemy import PrimaryKeyConstraint
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy.engine import Engine
+from sqlalchemy.engine import url as sqla_url
+from sqlalchemy.engine.strategies import MockEngineStrategy
+
+from .. import ddl
+from .. import util
+from ..util import sqla_compat
+from ..util.compat import EncodedIO
+
+if TYPE_CHECKING:
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.engine import URL
+    from sqlalchemy.engine.base import Connection
+    from sqlalchemy.engine.base import Transaction
+    from sqlalchemy.engine.mock import MockConnection
+    from sqlalchemy.sql import Executable
+
+    from .environment import EnvironmentContext
+    from ..config import Config
+    from ..script.base import Script
+    from ..script.base import ScriptDirectory
+    from ..script.revision import _RevisionOrBase
+    from ..script.revision import Revision
+    from ..script.revision import RevisionMap
+
+log = logging.getLogger(__name__)
+
+
+class _ProxyTransaction:
+    def __init__(self, migration_context: MigrationContext) -> None:
+        self.migration_context = migration_context
+
+    @property
+    def _proxied_transaction(self) -> Optional[Transaction]:
+        return self.migration_context._transaction
+
+    def rollback(self) -> None:
+        t = self._proxied_transaction
+        assert t is not None
+        t.rollback()
+        self.migration_context._transaction = None
+
+    def commit(self) -> None:
+        t = self._proxied_transaction
+        assert t is not None
+        t.commit()
+        self.migration_context._transaction = None
+
+    def __enter__(self) -> _ProxyTransaction:
+        return self
+
+    def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
+        if self._proxied_transaction is not None:
+            self._proxied_transaction.__exit__(type_, value, traceback)
+            self.migration_context._transaction = None
+
+
+class MigrationContext:
+
+    """Represent the database state made available to a migration
+    script.
+
+    :class:`.MigrationContext` is the front end to an actual
+    database connection, or alternatively a string output
+    stream given a particular database dialect,
+    from an Alembic perspective.
+
+    When inside the ``env.py`` script, the :class:`.MigrationContext`
+    is available via the
+    :meth:`.EnvironmentContext.get_context` method,
+    which is available at ``alembic.context``::
+
+        # from within env.py script
+        from alembic import context
+
+        migration_context = context.get_context()
+
+    For usage outside of an ``env.py`` script, such as for
+    utility routines that want to check the current version
+    in the database, the :meth:`.MigrationContext.configure`
+    method to create new :class:`.MigrationContext` objects.
+    For example, to get at the current revision in the
+    database using :meth:`.MigrationContext.get_current_revision`::
+
+        # in any application, outside of an env.py script
+        from alembic.migration import MigrationContext
+        from sqlalchemy import create_engine
+
+        engine = create_engine("postgresql://mydatabase")
+        conn = engine.connect()
+
+        context = MigrationContext.configure(conn)
+        current_rev = context.get_current_revision()
+
+    The above context can also be used to produce
+    Alembic migration operations with an :class:`.Operations`
+    instance::
+
+        # in any application, outside of the normal Alembic environment
+        from alembic.operations import Operations
+
+        op = Operations(context)
+        op.alter_column("mytable", "somecolumn", nullable=True)
+
+    """
+
+    def __init__(
+        self,
+        dialect: Dialect,
+        connection: Optional[Connection],
+        opts: Dict[str, Any],
+        environment_context: Optional[EnvironmentContext] = None,
+    ) -> None:
+        self.environment_context = environment_context
+        self.opts = opts
+        self.dialect = dialect
+        self.script: Optional[ScriptDirectory] = opts.get("script")
+        as_sql: bool = opts.get("as_sql", False)
+        transactional_ddl = opts.get("transactional_ddl")
+        self._transaction_per_migration = opts.get(
+            "transaction_per_migration", False
+        )
+        self.on_version_apply_callbacks = opts.get("on_version_apply", ())
+        self._transaction: Optional[Transaction] = None
+
+        if as_sql:
+            self.connection = cast(
+                Optional["Connection"], self._stdout_connection(connection)
+            )
+            assert self.connection is not None
+            self._in_external_transaction = False
+        else:
+            self.connection = connection
+            self._in_external_transaction = (
+                sqla_compat._get_connection_in_transaction(connection)
+            )
+
+        self._migrations_fn: Optional[
+            Callable[..., Iterable[RevisionStep]]
+        ] = opts.get("fn")
+        self.as_sql = as_sql
+
+        self.purge = opts.get("purge", False)
+
+        if "output_encoding" in opts:
+            self.output_buffer = EncodedIO(
+                opts.get("output_buffer")
+                or sys.stdout,  # type:ignore[arg-type]
+                opts["output_encoding"],
+            )
+        else:
+            self.output_buffer = opts.get("output_buffer", sys.stdout)
+
+        self._user_compare_type = opts.get("compare_type", True)
+        self._user_compare_server_default = opts.get(
+            "compare_server_default", False
+        )
+        self.version_table = version_table = opts.get(
+            "version_table", "alembic_version"
+        )
+        self.version_table_schema = version_table_schema = opts.get(
+            "version_table_schema", None
+        )
+        self._version = Table(
+            version_table,
+            MetaData(),
+            Column("version_num", String(32), nullable=False),
+            schema=version_table_schema,
+        )
+        if opts.get("version_table_pk", True):
+            self._version.append_constraint(
+                PrimaryKeyConstraint(
+                    "version_num", name="%s_pkc" % version_table
+                )
+            )
+
+        self._start_from_rev: Optional[str] = opts.get("starting_rev")
+        self.impl = ddl.DefaultImpl.get_by_dialect(dialect)(
+            dialect,
+            self.connection,
+            self.as_sql,
+            transactional_ddl,
+            self.output_buffer,
+            opts,
+        )
+        log.info("Context impl %s.", self.impl.__class__.__name__)
+        if self.as_sql:
+            log.info("Generating static SQL")
+        log.info(
+            "Will assume %s DDL.",
+            "transactional"
+            if self.impl.transactional_ddl
+            else "non-transactional",
+        )
+
+    @classmethod
+    def configure(
+        cls,
+        connection: Optional[Connection] = None,
+        url: Optional[Union[str, URL]] = None,
+        dialect_name: Optional[str] = None,
+        dialect: Optional[Dialect] = None,
+        environment_context: Optional[EnvironmentContext] = None,
+        dialect_opts: Optional[Dict[str, str]] = None,
+        opts: Optional[Any] = None,
+    ) -> MigrationContext:
+        """Create a new :class:`.MigrationContext`.
+
+        This is a factory method usually called
+        by :meth:`.EnvironmentContext.configure`.
+
+        :param connection: a :class:`~sqlalchemy.engine.Connection`
+         to use for SQL execution in "online" mode.  When present,
+         is also used to determine the type of dialect in use.
+        :param url: a string database url, or a
+         :class:`sqlalchemy.engine.url.URL` object.
+         The type of dialect to be used will be derived from this if
+         ``connection`` is not passed.
+        :param dialect_name: string name of a dialect, such as
+         "postgresql", "mssql", etc.  The type of dialect to be used will be
+         derived from this if ``connection`` and ``url`` are not passed.
+        :param opts: dictionary of options.  Most other options
+         accepted by :meth:`.EnvironmentContext.configure` are passed via
+         this dictionary.
+
+        """
+        if opts is None:
+            opts = {}
+        if dialect_opts is None:
+            dialect_opts = {}
+
+        if connection:
+            if isinstance(connection, Engine):
+                raise util.CommandError(
+                    "'connection' argument to configure() is expected "
+                    "to be a sqlalchemy.engine.Connection instance, "
+                    "got %r" % connection,
+                )
+
+            dialect = connection.dialect
+        elif url:
+            url_obj = sqla_url.make_url(url)
+            dialect = url_obj.get_dialect()(**dialect_opts)
+        elif dialect_name:
+            url_obj = sqla_url.make_url("%s://" % dialect_name)
+            dialect = url_obj.get_dialect()(**dialect_opts)
+        elif not dialect:
+            raise Exception("Connection, url, or dialect_name is required.")
+        assert dialect is not None
+        return MigrationContext(dialect, connection, opts, environment_context)
+
+    @contextmanager
+    def autocommit_block(self) -> Iterator[None]:
+        """Enter an "autocommit" block, for databases that support AUTOCOMMIT
+        isolation levels.
+
+        This special directive is intended to support the occasional database
+        DDL or system operation that specifically has to be run outside of
+        any kind of transaction block.   The PostgreSQL database platform
+        is the most common target for this style of operation, as many
+        of its DDL operations must be run outside of transaction blocks, even
+        though the database overall supports transactional DDL.
+
+        The method is used as a context manager within a migration script, by
+        calling on :meth:`.Operations.get_context` to retrieve the
+        :class:`.MigrationContext`, then invoking
+        :meth:`.MigrationContext.autocommit_block` using the ``with:``
+        statement::
+
+            def upgrade():
+                with op.get_context().autocommit_block():
+                    op.execute("ALTER TYPE mood ADD VALUE 'soso'")
+
+        Above, a PostgreSQL "ALTER TYPE..ADD VALUE" directive is emitted,
+        which must be run outside of a transaction block at the database level.
+        The :meth:`.MigrationContext.autocommit_block` method makes use of the
+        SQLAlchemy ``AUTOCOMMIT`` isolation level setting, which against the
+        psycogp2 DBAPI corresponds to the ``connection.autocommit`` setting,
+        to ensure that the database driver is not inside of a DBAPI level
+        transaction block.
+
+        .. warning::
+
+            As is necessary, **the database transaction preceding the block is
+            unconditionally committed**.  This means that the run of migrations
+            preceding the operation will be committed, before the overall
+            migration operation is complete.
+
+            It is recommended that when an application includes migrations with
+            "autocommit" blocks, that
+            :paramref:`.EnvironmentContext.transaction_per_migration` be used
+            so that the calling environment is tuned to expect short per-file
+            migrations whether or not one of them has an autocommit block.
+
+
+        """
+        _in_connection_transaction = self._in_connection_transaction()
+
+        if self.impl.transactional_ddl and self.as_sql:
+            self.impl.emit_commit()
+
+        elif _in_connection_transaction:
+            assert self._transaction is not None
+
+            self._transaction.commit()
+            self._transaction = None
+
+        if not self.as_sql:
+            assert self.connection is not None
+            current_level = self.connection.get_isolation_level()
+            base_connection = self.connection
+
+            # in 1.3 and 1.4 non-future mode, the connection gets switched
+            # out.  we can use the base connection with the new mode
+            # except that it will not know it's in "autocommit" and will
+            # emit deprecation warnings when an autocommit action takes
+            # place.
+            self.connection = (
+                self.impl.connection
+            ) = base_connection.execution_options(isolation_level="AUTOCOMMIT")
+
+            # sqlalchemy future mode will "autobegin" in any case, so take
+            # control of that "transaction" here
+            fake_trans: Optional[Transaction] = self.connection.begin()
+        else:
+            fake_trans = None
+        try:
+            yield
+        finally:
+            if not self.as_sql:
+                assert self.connection is not None
+                if fake_trans is not None:
+                    fake_trans.commit()
+                self.connection.execution_options(
+                    isolation_level=current_level
+                )
+                self.connection = self.impl.connection = base_connection
+
+            if self.impl.transactional_ddl and self.as_sql:
+                self.impl.emit_begin()
+
+            elif _in_connection_transaction:
+                assert self.connection is not None
+                self._transaction = self.connection.begin()
+
+    def begin_transaction(
+        self, _per_migration: bool = False
+    ) -> Union[_ProxyTransaction, ContextManager[None]]:
+        """Begin a logical transaction for migration operations.
+
+        This method is used within an ``env.py`` script to demarcate where
+        the outer "transaction" for a series of migrations begins.  Example::
+
+            def run_migrations_online():
+                connectable = create_engine(...)
+
+                with connectable.connect() as connection:
+                    context.configure(
+                        connection=connection, target_metadata=target_metadata
+                    )
+
+                    with context.begin_transaction():
+                        context.run_migrations()
+
+        Above, :meth:`.MigrationContext.begin_transaction` is used to demarcate
+        where the outer logical transaction occurs around the
+        :meth:`.MigrationContext.run_migrations` operation.
+
+        A "Logical" transaction means that the operation may or may not
+        correspond to a real database transaction.   If the target database
+        supports transactional DDL (or
+        :paramref:`.EnvironmentContext.configure.transactional_ddl` is true),
+        the :paramref:`.EnvironmentContext.configure.transaction_per_migration`
+        flag is not set, and the migration is against a real database
+        connection (as opposed to using "offline" ``--sql`` mode), a real
+        transaction will be started.   If ``--sql`` mode is in effect, the
+        operation would instead correspond to a string such as "BEGIN" being
+        emitted to the string output.
+
+        The returned object is a Python context manager that should only be
+        used in the context of a ``with:`` statement as indicated above.
+        The object has no other guaranteed API features present.
+
+        .. seealso::
+
+            :meth:`.MigrationContext.autocommit_block`
+
+        """
+
+        if self._in_external_transaction:
+            return nullcontext()
+
+        if self.impl.transactional_ddl:
+            transaction_now = _per_migration == self._transaction_per_migration
+        else:
+            transaction_now = _per_migration is True
+
+        if not transaction_now:
+            return nullcontext()
+
+        elif not self.impl.transactional_ddl:
+            assert _per_migration
+
+            if self.as_sql:
+                return nullcontext()
+            else:
+                # track our own notion of a "transaction block", which must be
+                # committed when complete.   Don't rely upon whether or not the
+                # SQLAlchemy connection reports as "in transaction"; this
+                # because SQLAlchemy future connection features autobegin
+                # behavior, so it may already be in a transaction from our
+                # emitting of queries like "has_version_table", etc. While we
+                # could track these operations as well, that leaves open the
+                # possibility of new operations or other things happening in
+                # the user environment that still may be triggering
+                # "autobegin".
+
+                in_transaction = self._transaction is not None
+
+                if in_transaction:
+                    return nullcontext()
+                else:
+                    assert self.connection is not None
+                    self._transaction = (
+                        sqla_compat._safe_begin_connection_transaction(
+                            self.connection
+                        )
+                    )
+                    return _ProxyTransaction(self)
+        elif self.as_sql:
+
+            @contextmanager
+            def begin_commit():
+                self.impl.emit_begin()
+                yield
+                self.impl.emit_commit()
+
+            return begin_commit()
+        else:
+            assert self.connection is not None
+            self._transaction = sqla_compat._safe_begin_connection_transaction(
+                self.connection
+            )
+            return _ProxyTransaction(self)
+
+    def get_current_revision(self) -> Optional[str]:
+        """Return the current revision, usually that which is present
+        in the ``alembic_version`` table in the database.
+
+        This method intends to be used only for a migration stream that
+        does not contain unmerged branches in the target database;
+        if there are multiple branches present, an exception is raised.
+        The :meth:`.MigrationContext.get_current_heads` should be preferred
+        over this method going forward in order to be compatible with
+        branch migration support.
+
+        If this :class:`.MigrationContext` was configured in "offline"
+        mode, that is with ``as_sql=True``, the ``starting_rev``
+        parameter is returned instead, if any.
+
+        """
+        heads = self.get_current_heads()
+        if len(heads) == 0:
+            return None
+        elif len(heads) > 1:
+            raise util.CommandError(
+                "Version table '%s' has more than one head present; "
+                "please use get_current_heads()" % self.version_table
+            )
+        else:
+            return heads[0]
+
+    def get_current_heads(self) -> Tuple[str, ...]:
+        """Return a tuple of the current 'head versions' that are represented
+        in the target database.
+
+        For a migration stream without branches, this will be a single
+        value, synonymous with that of
+        :meth:`.MigrationContext.get_current_revision`.   However when multiple
+        unmerged branches exist within the target database, the returned tuple
+        will contain a value for each head.
+
+        If this :class:`.MigrationContext` was configured in "offline"
+        mode, that is with ``as_sql=True``, the ``starting_rev``
+        parameter is returned in a one-length tuple.
+
+        If no version table is present, or if there are no revisions
+        present, an empty tuple is returned.
+
+        """
+        if self.as_sql:
+            start_from_rev: Any = self._start_from_rev
+            if start_from_rev == "base":
+                start_from_rev = None
+            elif start_from_rev is not None and self.script:
+                start_from_rev = [
+                    self.script.get_revision(sfr).revision
+                    for sfr in util.to_list(start_from_rev)
+                    if sfr not in (None, "base")
+                ]
+            return util.to_tuple(start_from_rev, default=())
+        else:
+            if self._start_from_rev:
+                raise util.CommandError(
+                    "Can't specify current_rev to context "
+                    "when using a database connection"
+                )
+            if not self._has_version_table():
+                return ()
+        assert self.connection is not None
+        return tuple(
+            row[0] for row in self.connection.execute(self._version.select())
+        )
+
+    def _ensure_version_table(self, purge: bool = False) -> None:
+        with sqla_compat._ensure_scope_for_ddl(self.connection):
+            assert self.connection is not None
+            self._version.create(self.connection, checkfirst=True)
+            if purge:
+                assert self.connection is not None
+                self.connection.execute(self._version.delete())
+
+    def _has_version_table(self) -> bool:
+        assert self.connection is not None
+        return sqla_compat._connectable_has_table(
+            self.connection, self.version_table, self.version_table_schema
+        )
+
+    def stamp(self, script_directory: ScriptDirectory, revision: str) -> None:
+        """Stamp the version table with a specific revision.
+
+        This method calculates those branches to which the given revision
+        can apply, and updates those branches as though they were migrated
+        towards that revision (either up or down).  If no current branches
+        include the revision, it is added as a new branch head.
+
+        """
+        heads = self.get_current_heads()
+        if not self.as_sql and not heads:
+            self._ensure_version_table()
+        head_maintainer = HeadMaintainer(self, heads)
+        for step in script_directory._stamp_revs(revision, heads):
+            head_maintainer.update_to_step(step)
+
+    def run_migrations(self, **kw: Any) -> None:
+        r"""Run the migration scripts established for this
+        :class:`.MigrationContext`, if any.
+
+        The commands in :mod:`alembic.command` will set up a function
+        that is ultimately passed to the :class:`.MigrationContext`
+        as the ``fn`` argument.  This function represents the "work"
+        that will be done when :meth:`.MigrationContext.run_migrations`
+        is called, typically from within the ``env.py`` script of the
+        migration environment.  The "work function" then provides an iterable
+        of version callables and other version information which
+        in the case of the ``upgrade`` or ``downgrade`` commands are the
+        list of version scripts to invoke.  Other commands yield nothing,
+        in the case that a command wants to run some other operation
+        against the database such as the ``current`` or ``stamp`` commands.
+
+        :param \**kw: keyword arguments here will be passed to each
+         migration callable, that is the ``upgrade()`` or ``downgrade()``
+         method within revision scripts.
+
+        """
+        self.impl.start_migrations()
+
+        heads: Tuple[str, ...]
+        if self.purge:
+            if self.as_sql:
+                raise util.CommandError("Can't use --purge with --sql mode")
+            self._ensure_version_table(purge=True)
+            heads = ()
+        else:
+            heads = self.get_current_heads()
+
+            dont_mutate = self.opts.get("dont_mutate", False)
+
+            if not self.as_sql and not heads and not dont_mutate:
+                self._ensure_version_table()
+
+        head_maintainer = HeadMaintainer(self, heads)
+
+        assert self._migrations_fn is not None
+        for step in self._migrations_fn(heads, self):
+            with self.begin_transaction(_per_migration=True):
+                if self.as_sql and not head_maintainer.heads:
+                    # for offline mode, include a CREATE TABLE from
+                    # the base
+                    assert self.connection is not None
+                    self._version.create(self.connection)
+                log.info("Running %s", step)
+                if self.as_sql:
+                    self.impl.static_output(
+                        "-- Running %s" % (step.short_log,)
+                    )
+                step.migration_fn(**kw)
+
+                # previously, we wouldn't stamp per migration
+                # if we were in a transaction, however given the more
+                # complex model that involves any number of inserts
+                # and row-targeted updates and deletes, it's simpler for now
+                # just to run the operations on every version
+                head_maintainer.update_to_step(step)
+                for callback in self.on_version_apply_callbacks:
+                    callback(
+                        ctx=self,
+                        step=step.info,
+                        heads=set(head_maintainer.heads),
+                        run_args=kw,
+                    )
+
+        if self.as_sql and not head_maintainer.heads:
+            assert self.connection is not None
+            self._version.drop(self.connection)
+
+    def _in_connection_transaction(self) -> bool:
+        try:
+            meth = self.connection.in_transaction  # type:ignore[union-attr]
+        except AttributeError:
+            return False
+        else:
+            return meth()
+
+    def execute(
+        self,
+        sql: Union[Executable, str],
+        execution_options: Optional[Dict[str, Any]] = None,
+    ) -> None:
+        """Execute a SQL construct or string statement.
+
+        The underlying execution mechanics are used, that is
+        if this is "offline mode" the SQL is written to the
+        output buffer, otherwise the SQL is emitted on
+        the current SQLAlchemy connection.
+
+        """
+        self.impl._exec(sql, execution_options)
+
+    def _stdout_connection(
+        self, connection: Optional[Connection]
+    ) -> MockConnection:
+        def dump(construct, *multiparams, **params):
+            self.impl._exec(construct)
+
+        return MockEngineStrategy.MockConnection(self.dialect, dump)
+
+    @property
+    def bind(self) -> Optional[Connection]:
+        """Return the current "bind".
+
+        In online mode, this is an instance of
+        :class:`sqlalchemy.engine.Connection`, and is suitable
+        for ad-hoc execution of any kind of usage described
+        in SQLAlchemy Core documentation as well as
+        for usage with the :meth:`sqlalchemy.schema.Table.create`
+        and :meth:`sqlalchemy.schema.MetaData.create_all` methods
+        of :class:`~sqlalchemy.schema.Table`,
+        :class:`~sqlalchemy.schema.MetaData`.
+
+        Note that when "standard output" mode is enabled,
+        this bind will be a "mock" connection handler that cannot
+        return results and is only appropriate for a very limited
+        subset of commands.
+
+        """
+        return self.connection
+
+    @property
+    def config(self) -> Optional[Config]:
+        """Return the :class:`.Config` used by the current environment,
+        if any."""
+
+        if self.environment_context:
+            return self.environment_context.config
+        else:
+            return None
+
+    def _compare_type(
+        self, inspector_column: Column[Any], metadata_column: Column
+    ) -> bool:
+        if self._user_compare_type is False:
+            return False
+
+        if callable(self._user_compare_type):
+            user_value = self._user_compare_type(
+                self,
+                inspector_column,
+                metadata_column,
+                inspector_column.type,
+                metadata_column.type,
+            )
+            if user_value is not None:
+                return user_value
+
+        return self.impl.compare_type(inspector_column, metadata_column)
+
+    def _compare_server_default(
+        self,
+        inspector_column: Column[Any],
+        metadata_column: Column[Any],
+        rendered_metadata_default: Optional[str],
+        rendered_column_default: Optional[str],
+    ) -> bool:
+        if self._user_compare_server_default is False:
+            return False
+
+        if callable(self._user_compare_server_default):
+            user_value = self._user_compare_server_default(
+                self,
+                inspector_column,
+                metadata_column,
+                rendered_column_default,
+                metadata_column.server_default,
+                rendered_metadata_default,
+            )
+            if user_value is not None:
+                return user_value
+
+        return self.impl.compare_server_default(
+            inspector_column,
+            metadata_column,
+            rendered_metadata_default,
+            rendered_column_default,
+        )
+
+
+class HeadMaintainer:
+    def __init__(self, context: MigrationContext, heads: Any) -> None:
+        self.context = context
+        self.heads = set(heads)
+
+    def _insert_version(self, version: str) -> None:
+        assert version not in self.heads
+        self.heads.add(version)
+
+        self.context.impl._exec(
+            self.context._version.insert().values(
+                version_num=literal_column("'%s'" % version)
+            )
+        )
+
+    def _delete_version(self, version: str) -> None:
+        self.heads.remove(version)
+
+        ret = self.context.impl._exec(
+            self.context._version.delete().where(
+                self.context._version.c.version_num
+                == literal_column("'%s'" % version)
+            )
+        )
+
+        if (
+            not self.context.as_sql
+            and self.context.dialect.supports_sane_rowcount
+            and ret is not None
+            and ret.rowcount != 1
+        ):
+            raise util.CommandError(
+                "Online migration expected to match one "
+                "row when deleting '%s' in '%s'; "
+                "%d found"
+                % (version, self.context.version_table, ret.rowcount)
+            )
+
+    def _update_version(self, from_: str, to_: str) -> None:
+        assert to_ not in self.heads
+        self.heads.remove(from_)
+        self.heads.add(to_)
+
+        ret = self.context.impl._exec(
+            self.context._version.update()
+            .values(version_num=literal_column("'%s'" % to_))
+            .where(
+                self.context._version.c.version_num
+                == literal_column("'%s'" % from_)
+            )
+        )
+
+        if (
+            not self.context.as_sql
+            and self.context.dialect.supports_sane_rowcount
+            and ret is not None
+            and ret.rowcount != 1
+        ):
+            raise util.CommandError(
+                "Online migration expected to match one "
+                "row when updating '%s' to '%s' in '%s'; "
+                "%d found"
+                % (from_, to_, self.context.version_table, ret.rowcount)
+            )
+
+    def update_to_step(self, step: Union[RevisionStep, StampStep]) -> None:
+        if step.should_delete_branch(self.heads):
+            vers = step.delete_version_num
+            log.debug("branch delete %s", vers)
+            self._delete_version(vers)
+        elif step.should_create_branch(self.heads):
+            vers = step.insert_version_num
+            log.debug("new branch insert %s", vers)
+            self._insert_version(vers)
+        elif step.should_merge_branches(self.heads):
+            # delete revs, update from rev, update to rev
+            (
+                delete_revs,
+                update_from_rev,
+                update_to_rev,
+            ) = step.merge_branch_idents(self.heads)
+            log.debug(
+                "merge, delete %s, update %s to %s",
+                delete_revs,
+                update_from_rev,
+                update_to_rev,
+            )
+            for delrev in delete_revs:
+                self._delete_version(delrev)
+            self._update_version(update_from_rev, update_to_rev)
+        elif step.should_unmerge_branches(self.heads):
+            (
+                update_from_rev,
+                update_to_rev,
+                insert_revs,
+            ) = step.unmerge_branch_idents(self.heads)
+            log.debug(
+                "unmerge, insert %s, update %s to %s",
+                insert_revs,
+                update_from_rev,
+                update_to_rev,
+            )
+            for insrev in insert_revs:
+                self._insert_version(insrev)
+            self._update_version(update_from_rev, update_to_rev)
+        else:
+            from_, to_ = step.update_version_num(self.heads)
+            log.debug("update %s to %s", from_, to_)
+            self._update_version(from_, to_)
+
+
+class MigrationInfo:
+    """Exposes information about a migration step to a callback listener.
+
+    The :class:`.MigrationInfo` object is available exclusively for the
+    benefit of the :paramref:`.EnvironmentContext.on_version_apply`
+    callback hook.
+
+    """
+
+    is_upgrade: bool
+    """True/False: indicates whether this operation ascends or descends the
+    version tree."""
+
+    is_stamp: bool
+    """True/False: indicates whether this operation is a stamp (i.e. whether
+    it results in any actual database operations)."""
+
+    up_revision_id: Optional[str]
+    """Version string corresponding to :attr:`.Revision.revision`.
+
+    In the case of a stamp operation, it is advised to use the
+    :attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can
+    make a single movement from one or more branches down to a single
+    branchpoint, in which case there will be multiple "up" revisions.
+
+    .. seealso::
+
+        :attr:`.MigrationInfo.up_revision_ids`
+
+    """
+
+    up_revision_ids: Tuple[str, ...]
+    """Tuple of version strings corresponding to :attr:`.Revision.revision`.
+
+    In the majority of cases, this tuple will be a single value, synonymous
+    with the scalar value of :attr:`.MigrationInfo.up_revision_id`.
+    It can be multiple revision identifiers only in the case of an
+    ``alembic stamp`` operation which is moving downwards from multiple
+    branches down to their common branch point.
+
+    """
+
+    down_revision_ids: Tuple[str, ...]
+    """Tuple of strings representing the base revisions of this migration step.
+
+    If empty, this represents a root revision; otherwise, the first item
+    corresponds to :attr:`.Revision.down_revision`, and the rest are inferred
+    from dependencies.
+    """
+
+    revision_map: RevisionMap
+    """The revision map inside of which this operation occurs."""
+
+    def __init__(
+        self,
+        revision_map: RevisionMap,
+        is_upgrade: bool,
+        is_stamp: bool,
+        up_revisions: Union[str, Tuple[str, ...]],
+        down_revisions: Union[str, Tuple[str, ...]],
+    ) -> None:
+        self.revision_map = revision_map
+        self.is_upgrade = is_upgrade
+        self.is_stamp = is_stamp
+        self.up_revision_ids = util.to_tuple(up_revisions, default=())
+        if self.up_revision_ids:
+            self.up_revision_id = self.up_revision_ids[0]
+        else:
+            # this should never be the case with
+            # "upgrade", "downgrade", or "stamp" as we are always
+            # measuring movement in terms of at least one upgrade version
+            self.up_revision_id = None
+        self.down_revision_ids = util.to_tuple(down_revisions, default=())
+
+    @property
+    def is_migration(self) -> bool:
+        """True/False: indicates whether this operation is a migration.
+
+        At present this is true if and only the migration is not a stamp.
+        If other operation types are added in the future, both this attribute
+        and :attr:`~.MigrationInfo.is_stamp` will be false.
+        """
+        return not self.is_stamp
+
+    @property
+    def source_revision_ids(self) -> Tuple[str, ...]:
+        """Active revisions before this migration step is applied."""
+        return (
+            self.down_revision_ids if self.is_upgrade else self.up_revision_ids
+        )
+
+    @property
+    def destination_revision_ids(self) -> Tuple[str, ...]:
+        """Active revisions after this migration step is applied."""
+        return (
+            self.up_revision_ids if self.is_upgrade else self.down_revision_ids
+        )
+
+    @property
+    def up_revision(self) -> Optional[Revision]:
+        """Get :attr:`~.MigrationInfo.up_revision_id` as
+        a :class:`.Revision`.
+
+        """
+        return self.revision_map.get_revision(self.up_revision_id)
+
+    @property
+    def up_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Get :attr:`~.MigrationInfo.up_revision_ids` as a
+        :class:`.Revision`."""
+        return self.revision_map.get_revisions(self.up_revision_ids)
+
+    @property
+    def down_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of
+        :class:`Revisions <.Revision>`."""
+        return self.revision_map.get_revisions(self.down_revision_ids)
+
+    @property
+    def source_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of
+        :class:`Revisions <.Revision>`."""
+        return self.revision_map.get_revisions(self.source_revision_ids)
+
+    @property
+    def destination_revisions(self) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of
+        :class:`Revisions <.Revision>`."""
+        return self.revision_map.get_revisions(self.destination_revision_ids)
+
+
+class MigrationStep:
+    from_revisions_no_deps: Tuple[str, ...]
+    to_revisions_no_deps: Tuple[str, ...]
+    is_upgrade: bool
+    migration_fn: Any
+
+    if TYPE_CHECKING:
+
+        @property
+        def doc(self) -> Optional[str]:
+            ...
+
+    @property
+    def name(self) -> str:
+        return self.migration_fn.__name__
+
+    @classmethod
+    def upgrade_from_script(
+        cls, revision_map: RevisionMap, script: Script
+    ) -> RevisionStep:
+        return RevisionStep(revision_map, script, True)
+
+    @classmethod
+    def downgrade_from_script(
+        cls, revision_map: RevisionMap, script: Script
+    ) -> RevisionStep:
+        return RevisionStep(revision_map, script, False)
+
+    @property
+    def is_downgrade(self) -> bool:
+        return not self.is_upgrade
+
+    @property
+    def short_log(self) -> str:
+        return "%s %s -> %s" % (
+            self.name,
+            util.format_as_comma(self.from_revisions_no_deps),
+            util.format_as_comma(self.to_revisions_no_deps),
+        )
+
+    def __str__(self):
+        if self.doc:
+            return "%s %s -> %s, %s" % (
+                self.name,
+                util.format_as_comma(self.from_revisions_no_deps),
+                util.format_as_comma(self.to_revisions_no_deps),
+                self.doc,
+            )
+        else:
+            return self.short_log
+
+
+class RevisionStep(MigrationStep):
+    def __init__(
+        self, revision_map: RevisionMap, revision: Script, is_upgrade: bool
+    ) -> None:
+        self.revision_map = revision_map
+        self.revision = revision
+        self.is_upgrade = is_upgrade
+        if is_upgrade:
+            self.migration_fn = revision.module.upgrade
+        else:
+            self.migration_fn = revision.module.downgrade
+
+    def __repr__(self):
+        return "RevisionStep(%r, is_upgrade=%r)" % (
+            self.revision.revision,
+            self.is_upgrade,
+        )
+
+    def __eq__(self, other: object) -> bool:
+        return (
+            isinstance(other, RevisionStep)
+            and other.revision == self.revision
+            and self.is_upgrade == other.is_upgrade
+        )
+
+    @property
+    def doc(self) -> Optional[str]:
+        return self.revision.doc
+
+    @property
+    def from_revisions(self) -> Tuple[str, ...]:
+        if self.is_upgrade:
+            return self.revision._normalized_down_revisions
+        else:
+            return (self.revision.revision,)
+
+    @property
+    def from_revisions_no_deps(  # type:ignore[override]
+        self,
+    ) -> Tuple[str, ...]:
+        if self.is_upgrade:
+            return self.revision._versioned_down_revisions
+        else:
+            return (self.revision.revision,)
+
+    @property
+    def to_revisions(self) -> Tuple[str, ...]:
+        if self.is_upgrade:
+            return (self.revision.revision,)
+        else:
+            return self.revision._normalized_down_revisions
+
+    @property
+    def to_revisions_no_deps(  # type:ignore[override]
+        self,
+    ) -> Tuple[str, ...]:
+        if self.is_upgrade:
+            return (self.revision.revision,)
+        else:
+            return self.revision._versioned_down_revisions
+
+    @property
+    def _has_scalar_down_revision(self) -> bool:
+        return len(self.revision._normalized_down_revisions) == 1
+
+    def should_delete_branch(self, heads: Set[str]) -> bool:
+        """A delete is when we are a. in a downgrade and b.
+        we are going to the "base" or we are going to a version that
+        is implied as a dependency on another version that is remaining.
+
+        """
+        if not self.is_downgrade:
+            return False
+
+        if self.revision.revision not in heads:
+            return False
+
+        downrevs = self.revision._normalized_down_revisions
+
+        if not downrevs:
+            # is a base
+            return True
+        else:
+            # determine what the ultimate "to_revisions" for an
+            # unmerge would be.  If there are none, then we're a delete.
+            to_revisions = self._unmerge_to_revisions(heads)
+            return not to_revisions
+
+    def merge_branch_idents(
+        self, heads: Set[str]
+    ) -> Tuple[List[str], str, str]:
+        other_heads = set(heads).difference(self.from_revisions)
+
+        if other_heads:
+            ancestors = {
+                r.revision
+                for r in self.revision_map._get_ancestor_nodes(
+                    self.revision_map.get_revisions(other_heads), check=False
+                )
+            }
+            from_revisions = list(
+                set(self.from_revisions).difference(ancestors)
+            )
+        else:
+            from_revisions = list(self.from_revisions)
+
+        return (
+            # delete revs, update from rev, update to rev
+            list(from_revisions[0:-1]),
+            from_revisions[-1],
+            self.to_revisions[0],
+        )
+
+    def _unmerge_to_revisions(self, heads: Set[str]) -> Tuple[str, ...]:
+        other_heads = set(heads).difference([self.revision.revision])
+        if other_heads:
+            ancestors = {
+                r.revision
+                for r in self.revision_map._get_ancestor_nodes(
+                    self.revision_map.get_revisions(other_heads), check=False
+                )
+            }
+            return tuple(set(self.to_revisions).difference(ancestors))
+        else:
+            # for each revision we plan to return, compute its ancestors
+            # (excluding self), and remove those from the final output since
+            # they are already accounted for.
+            ancestors = {
+                r.revision
+                for to_revision in self.to_revisions
+                for r in self.revision_map._get_ancestor_nodes(
+                    self.revision_map.get_revisions(to_revision), check=False
+                )
+                if r.revision != to_revision
+            }
+            return tuple(set(self.to_revisions).difference(ancestors))
+
+    def unmerge_branch_idents(
+        self, heads: Set[str]
+    ) -> Tuple[str, str, Tuple[str, ...]]:
+        to_revisions = self._unmerge_to_revisions(heads)
+
+        return (
+            # update from rev, update to rev, insert revs
+            self.from_revisions[0],
+            to_revisions[-1],
+            to_revisions[0:-1],
+        )
+
+    def should_create_branch(self, heads: Set[str]) -> bool:
+        if not self.is_upgrade:
+            return False
+
+        downrevs = self.revision._normalized_down_revisions
+
+        if not downrevs:
+            # is a base
+            return True
+        else:
+            # none of our downrevs are present, so...
+            # we have to insert our version.   This is true whether
+            # or not there is only one downrev, or multiple (in the latter
+            # case, we're a merge point.)
+            if not heads.intersection(downrevs):
+                return True
+            else:
+                return False
+
+    def should_merge_branches(self, heads: Set[str]) -> bool:
+        if not self.is_upgrade:
+            return False
+
+        downrevs = self.revision._normalized_down_revisions
+
+        if len(downrevs) > 1 and len(heads.intersection(downrevs)) > 1:
+            return True
+
+        return False
+
+    def should_unmerge_branches(self, heads: Set[str]) -> bool:
+        if not self.is_downgrade:
+            return False
+
+        downrevs = self.revision._normalized_down_revisions
+
+        if self.revision.revision in heads and len(downrevs) > 1:
+            return True
+
+        return False
+
+    def update_version_num(self, heads: Set[str]) -> Tuple[str, str]:
+        if not self._has_scalar_down_revision:
+            downrev = heads.intersection(
+                self.revision._normalized_down_revisions
+            )
+            assert (
+                len(downrev) == 1
+            ), "Can't do an UPDATE because downrevision is ambiguous"
+            down_revision = list(downrev)[0]
+        else:
+            down_revision = self.revision._normalized_down_revisions[0]
+
+        if self.is_upgrade:
+            return down_revision, self.revision.revision
+        else:
+            return self.revision.revision, down_revision
+
+    @property
+    def delete_version_num(self) -> str:
+        return self.revision.revision
+
+    @property
+    def insert_version_num(self) -> str:
+        return self.revision.revision
+
+    @property
+    def info(self) -> MigrationInfo:
+        return MigrationInfo(
+            revision_map=self.revision_map,
+            up_revisions=self.revision.revision,
+            down_revisions=self.revision._normalized_down_revisions,
+            is_upgrade=self.is_upgrade,
+            is_stamp=False,
+        )
+
+
+class StampStep(MigrationStep):
+    def __init__(
+        self,
+        from_: Optional[Union[str, Collection[str]]],
+        to_: Optional[Union[str, Collection[str]]],
+        is_upgrade: bool,
+        branch_move: bool,
+        revision_map: Optional[RevisionMap] = None,
+    ) -> None:
+        self.from_: Tuple[str, ...] = util.to_tuple(from_, default=())
+        self.to_: Tuple[str, ...] = util.to_tuple(to_, default=())
+        self.is_upgrade = is_upgrade
+        self.branch_move = branch_move
+        self.migration_fn = self.stamp_revision
+        self.revision_map = revision_map
+
+    doc: Optional[str] = None
+
+    def stamp_revision(self, **kw: Any) -> None:
+        return None
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, StampStep)
+            and other.from_revisions == self.from_revisions
+            and other.to_revisions == self.to_revisions
+            and other.branch_move == self.branch_move
+            and self.is_upgrade == other.is_upgrade
+        )
+
+    @property
+    def from_revisions(self):
+        return self.from_
+
+    @property
+    def to_revisions(self) -> Tuple[str, ...]:
+        return self.to_
+
+    @property
+    def from_revisions_no_deps(  # type:ignore[override]
+        self,
+    ) -> Tuple[str, ...]:
+        return self.from_
+
+    @property
+    def to_revisions_no_deps(  # type:ignore[override]
+        self,
+    ) -> Tuple[str, ...]:
+        return self.to_
+
+    @property
+    def delete_version_num(self) -> str:
+        assert len(self.from_) == 1
+        return self.from_[0]
+
+    @property
+    def insert_version_num(self) -> str:
+        assert len(self.to_) == 1
+        return self.to_[0]
+
+    def update_version_num(self, heads: Set[str]) -> Tuple[str, str]:
+        assert len(self.from_) == 1
+        assert len(self.to_) == 1
+        return self.from_[0], self.to_[0]
+
+    def merge_branch_idents(
+        self, heads: Union[Set[str], List[str]]
+    ) -> Union[Tuple[List[Any], str, str], Tuple[List[str], str, str]]:
+        return (
+            # delete revs, update from rev, update to rev
+            list(self.from_[0:-1]),
+            self.from_[-1],
+            self.to_[0],
+        )
+
+    def unmerge_branch_idents(
+        self, heads: Set[str]
+    ) -> Tuple[str, str, List[str]]:
+        return (
+            # update from rev, update to rev, insert revs
+            self.from_[0],
+            self.to_[-1],
+            list(self.to_[0:-1]),
+        )
+
+    def should_delete_branch(self, heads: Set[str]) -> bool:
+        # TODO: we probably need to look for self.to_ inside of heads,
+        # in a similar manner as should_create_branch, however we have
+        # no tests for this yet (stamp downgrades w/ branches)
+        return self.is_downgrade and self.branch_move
+
+    def should_create_branch(self, heads: Set[str]) -> Union[Set[str], bool]:
+        return (
+            self.is_upgrade
+            and (self.branch_move or set(self.from_).difference(heads))
+            and set(self.to_).difference(heads)
+        )
+
+    def should_merge_branches(self, heads: Set[str]) -> bool:
+        return len(self.from_) > 1
+
+    def should_unmerge_branches(self, heads: Set[str]) -> bool:
+        return len(self.to_) > 1
+
+    @property
+    def info(self) -> MigrationInfo:
+        up, down = (
+            (self.to_, self.from_)
+            if self.is_upgrade
+            else (self.from_, self.to_)
+        )
+        assert self.revision_map is not None
+        return MigrationInfo(
+            revision_map=self.revision_map,
+            up_revisions=up,
+            down_revisions=down,
+            is_upgrade=self.is_upgrade,
+            is_stamp=True,
+        )
diff --git a/venv/Lib/site-packages/alembic/script/__init__.py b/venv/Lib/site-packages/alembic/script/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d78f3f1dc54c13a52b64e8d668c2baf708eb20bc
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/script/__init__.py
@@ -0,0 +1,4 @@
+from .base import Script
+from .base import ScriptDirectory
+
+__all__ = ["ScriptDirectory", "Script"]
diff --git a/venv/Lib/site-packages/alembic/script/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/script/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ec12e2f6509a5c2e8eed0f4ad230519c13225c3
Binary files /dev/null and b/venv/Lib/site-packages/alembic/script/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/script/__pycache__/base.cpython-311.pyc b/venv/Lib/site-packages/alembic/script/__pycache__/base.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aef3d149bafb358f9772a6cc5c7bc033e4d66213
Binary files /dev/null and b/venv/Lib/site-packages/alembic/script/__pycache__/base.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/script/__pycache__/revision.cpython-311.pyc b/venv/Lib/site-packages/alembic/script/__pycache__/revision.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c72dc10d564c234b151d5d45a1c942bdafb8e2c9
Binary files /dev/null and b/venv/Lib/site-packages/alembic/script/__pycache__/revision.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/script/__pycache__/write_hooks.cpython-311.pyc b/venv/Lib/site-packages/alembic/script/__pycache__/write_hooks.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3433e46992244bb166579d790bd7e8dc85dee8b
Binary files /dev/null and b/venv/Lib/site-packages/alembic/script/__pycache__/write_hooks.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/script/base.py b/venv/Lib/site-packages/alembic/script/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..5945ca591c221279b05b07833591faa4ad4cd628
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/script/base.py
@@ -0,0 +1,1060 @@
+from __future__ import annotations
+
+from contextlib import contextmanager
+import datetime
+import os
+import re
+import shutil
+import sys
+from types import ModuleType
+from typing import Any
+from typing import cast
+from typing import Iterator
+from typing import List
+from typing import Mapping
+from typing import Optional
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import Union
+
+from . import revision
+from . import write_hooks
+from .. import util
+from ..runtime import migration
+from ..util import compat
+from ..util import not_none
+
+if TYPE_CHECKING:
+    from .revision import _GetRevArg
+    from .revision import _RevIdType
+    from .revision import Revision
+    from ..config import Config
+    from ..config import MessagingOptions
+    from ..runtime.migration import RevisionStep
+    from ..runtime.migration import StampStep
+
+try:
+    if compat.py39:
+        from zoneinfo import ZoneInfo
+        from zoneinfo import ZoneInfoNotFoundError
+    else:
+        from backports.zoneinfo import ZoneInfo  # type: ignore[import-not-found,no-redef] # noqa: E501
+        from backports.zoneinfo import ZoneInfoNotFoundError  # type: ignore[no-redef] # noqa: E501
+except ImportError:
+    ZoneInfo = None  # type: ignore[assignment, misc]
+
+_sourceless_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)(c|o)?$")
+_only_source_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)$")
+_legacy_rev = re.compile(r"([a-f0-9]+)\.py$")
+_slug_re = re.compile(r"\w+")
+_default_file_template = "%(rev)s_%(slug)s"
+_split_on_space_comma = re.compile(r", *|(?: +)")
+
+_split_on_space_comma_colon = re.compile(r", *|(?: +)|\:")
+
+
+class ScriptDirectory:
+
+    """Provides operations upon an Alembic script directory.
+
+    This object is useful to get information as to current revisions,
+    most notably being able to get at the "head" revision, for schemes
+    that want to test if the current revision in the database is the most
+    recent::
+
+        from alembic.script import ScriptDirectory
+        from alembic.config import Config
+        config = Config()
+        config.set_main_option("script_location", "myapp:migrations")
+        script = ScriptDirectory.from_config(config)
+
+        head_revision = script.get_current_head()
+
+
+
+    """
+
+    def __init__(
+        self,
+        dir: str,  # noqa
+        file_template: str = _default_file_template,
+        truncate_slug_length: Optional[int] = 40,
+        version_locations: Optional[List[str]] = None,
+        sourceless: bool = False,
+        output_encoding: str = "utf-8",
+        timezone: Optional[str] = None,
+        hook_config: Optional[Mapping[str, str]] = None,
+        recursive_version_locations: bool = False,
+        messaging_opts: MessagingOptions = cast(
+            "MessagingOptions", util.EMPTY_DICT
+        ),
+    ) -> None:
+        self.dir = dir
+        self.file_template = file_template
+        self.version_locations = version_locations
+        self.truncate_slug_length = truncate_slug_length or 40
+        self.sourceless = sourceless
+        self.output_encoding = output_encoding
+        self.revision_map = revision.RevisionMap(self._load_revisions)
+        self.timezone = timezone
+        self.hook_config = hook_config
+        self.recursive_version_locations = recursive_version_locations
+        self.messaging_opts = messaging_opts
+
+        if not os.access(dir, os.F_OK):
+            raise util.CommandError(
+                "Path doesn't exist: %r.  Please use "
+                "the 'init' command to create a new "
+                "scripts folder." % os.path.abspath(dir)
+            )
+
+    @property
+    def versions(self) -> str:
+        loc = self._version_locations
+        if len(loc) > 1:
+            raise util.CommandError("Multiple version_locations present")
+        else:
+            return loc[0]
+
+    @util.memoized_property
+    def _version_locations(self) -> Sequence[str]:
+        if self.version_locations:
+            return [
+                os.path.abspath(util.coerce_resource_to_filename(location))
+                for location in self.version_locations
+            ]
+        else:
+            return (os.path.abspath(os.path.join(self.dir, "versions")),)
+
+    def _load_revisions(self) -> Iterator[Script]:
+        if self.version_locations:
+            paths = [
+                vers
+                for vers in self._version_locations
+                if os.path.exists(vers)
+            ]
+        else:
+            paths = [self.versions]
+
+        dupes = set()
+        for vers in paths:
+            for file_path in Script._list_py_dir(self, vers):
+                real_path = os.path.realpath(file_path)
+                if real_path in dupes:
+                    util.warn(
+                        "File %s loaded twice! ignoring. Please ensure "
+                        "version_locations is unique." % real_path
+                    )
+                    continue
+                dupes.add(real_path)
+
+                filename = os.path.basename(real_path)
+                dir_name = os.path.dirname(real_path)
+                script = Script._from_filename(self, dir_name, filename)
+                if script is None:
+                    continue
+                yield script
+
+    @classmethod
+    def from_config(cls, config: Config) -> ScriptDirectory:
+        """Produce a new :class:`.ScriptDirectory` given a :class:`.Config`
+        instance.
+
+        The :class:`.Config` need only have the ``script_location`` key
+        present.
+
+        """
+        script_location = config.get_main_option("script_location")
+        if script_location is None:
+            raise util.CommandError(
+                "No 'script_location' key " "found in configuration."
+            )
+        truncate_slug_length: Optional[int]
+        tsl = config.get_main_option("truncate_slug_length")
+        if tsl is not None:
+            truncate_slug_length = int(tsl)
+        else:
+            truncate_slug_length = None
+
+        version_locations_str = config.get_main_option("version_locations")
+        version_locations: Optional[List[str]]
+        if version_locations_str:
+            version_path_separator = config.get_main_option(
+                "version_path_separator"
+            )
+
+            split_on_path = {
+                None: None,
+                "space": " ",
+                "os": os.pathsep,
+                ":": ":",
+                ";": ";",
+            }
+
+            try:
+                split_char: Optional[str] = split_on_path[
+                    version_path_separator
+                ]
+            except KeyError as ke:
+                raise ValueError(
+                    "'%s' is not a valid value for "
+                    "version_path_separator; "
+                    "expected 'space', 'os', ':', ';'" % version_path_separator
+                ) from ke
+            else:
+                if split_char is None:
+                    # legacy behaviour for backwards compatibility
+                    version_locations = _split_on_space_comma.split(
+                        version_locations_str
+                    )
+                else:
+                    version_locations = [
+                        x for x in version_locations_str.split(split_char) if x
+                    ]
+        else:
+            version_locations = None
+
+        prepend_sys_path = config.get_main_option("prepend_sys_path")
+        if prepend_sys_path:
+            sys.path[:0] = list(
+                _split_on_space_comma_colon.split(prepend_sys_path)
+            )
+
+        rvl = config.get_main_option("recursive_version_locations") == "true"
+        return ScriptDirectory(
+            util.coerce_resource_to_filename(script_location),
+            file_template=config.get_main_option(
+                "file_template", _default_file_template
+            ),
+            truncate_slug_length=truncate_slug_length,
+            sourceless=config.get_main_option("sourceless") == "true",
+            output_encoding=config.get_main_option("output_encoding", "utf-8"),
+            version_locations=version_locations,
+            timezone=config.get_main_option("timezone"),
+            hook_config=config.get_section("post_write_hooks", {}),
+            recursive_version_locations=rvl,
+            messaging_opts=config.messaging_opts,
+        )
+
+    @contextmanager
+    def _catch_revision_errors(
+        self,
+        ancestor: Optional[str] = None,
+        multiple_heads: Optional[str] = None,
+        start: Optional[str] = None,
+        end: Optional[str] = None,
+        resolution: Optional[str] = None,
+    ) -> Iterator[None]:
+        try:
+            yield
+        except revision.RangeNotAncestorError as rna:
+            if start is None:
+                start = cast(Any, rna.lower)
+            if end is None:
+                end = cast(Any, rna.upper)
+            if not ancestor:
+                ancestor = (
+                    "Requested range %(start)s:%(end)s does not refer to "
+                    "ancestor/descendant revisions along the same branch"
+                )
+            ancestor = ancestor % {"start": start, "end": end}
+            raise util.CommandError(ancestor) from rna
+        except revision.MultipleHeads as mh:
+            if not multiple_heads:
+                multiple_heads = (
+                    "Multiple head revisions are present for given "
+                    "argument '%(head_arg)s'; please "
+                    "specify a specific target revision, "
+                    "'<branchname>@%(head_arg)s' to "
+                    "narrow to a specific head, or 'heads' for all heads"
+                )
+            multiple_heads = multiple_heads % {
+                "head_arg": end or mh.argument,
+                "heads": util.format_as_comma(mh.heads),
+            }
+            raise util.CommandError(multiple_heads) from mh
+        except revision.ResolutionError as re:
+            if resolution is None:
+                resolution = "Can't locate revision identified by '%s'" % (
+                    re.argument
+                )
+            raise util.CommandError(resolution) from re
+        except revision.RevisionError as err:
+            raise util.CommandError(err.args[0]) from err
+
+    def walk_revisions(
+        self, base: str = "base", head: str = "heads"
+    ) -> Iterator[Script]:
+        """Iterate through all revisions.
+
+        :param base: the base revision, or "base" to start from the
+         empty revision.
+
+        :param head: the head revision; defaults to "heads" to indicate
+         all head revisions.  May also be "head" to indicate a single
+         head revision.
+
+        """
+        with self._catch_revision_errors(start=base, end=head):
+            for rev in self.revision_map.iterate_revisions(
+                head, base, inclusive=True, assert_relative_length=False
+            ):
+                yield cast(Script, rev)
+
+    def get_revisions(self, id_: _GetRevArg) -> Tuple[Script, ...]:
+        """Return the :class:`.Script` instance with the given rev identifier,
+        symbolic name, or sequence of identifiers.
+
+        """
+        with self._catch_revision_errors():
+            return cast(
+                Tuple[Script, ...],
+                self.revision_map.get_revisions(id_),
+            )
+
+    def get_all_current(self, id_: Tuple[str, ...]) -> Set[Script]:
+        with self._catch_revision_errors():
+            return cast(Set[Script], self.revision_map._get_all_current(id_))
+
+    def get_revision(self, id_: str) -> Script:
+        """Return the :class:`.Script` instance with the given rev id.
+
+        .. seealso::
+
+            :meth:`.ScriptDirectory.get_revisions`
+
+        """
+
+        with self._catch_revision_errors():
+            return cast(Script, self.revision_map.get_revision(id_))
+
+    def as_revision_number(
+        self, id_: Optional[str]
+    ) -> Optional[Union[str, Tuple[str, ...]]]:
+        """Convert a symbolic revision, i.e. 'head' or 'base', into
+        an actual revision number."""
+
+        with self._catch_revision_errors():
+            rev, branch_name = self.revision_map._resolve_revision_number(id_)
+
+        if not rev:
+            # convert () to None
+            return None
+        elif id_ == "heads":
+            return rev
+        else:
+            return rev[0]
+
+    def iterate_revisions(
+        self,
+        upper: Union[str, Tuple[str, ...], None],
+        lower: Union[str, Tuple[str, ...], None],
+        **kw: Any,
+    ) -> Iterator[Script]:
+        """Iterate through script revisions, starting at the given
+        upper revision identifier and ending at the lower.
+
+        The traversal uses strictly the `down_revision`
+        marker inside each migration script, so
+        it is a requirement that upper >= lower,
+        else you'll get nothing back.
+
+        The iterator yields :class:`.Script` objects.
+
+        .. seealso::
+
+            :meth:`.RevisionMap.iterate_revisions`
+
+        """
+        return cast(
+            Iterator[Script],
+            self.revision_map.iterate_revisions(upper, lower, **kw),
+        )
+
+    def get_current_head(self) -> Optional[str]:
+        """Return the current head revision.
+
+        If the script directory has multiple heads
+        due to branching, an error is raised;
+        :meth:`.ScriptDirectory.get_heads` should be
+        preferred.
+
+        :return: a string revision number.
+
+        .. seealso::
+
+            :meth:`.ScriptDirectory.get_heads`
+
+        """
+        with self._catch_revision_errors(
+            multiple_heads=(
+                "The script directory has multiple heads (due to branching)."
+                "Please use get_heads(), or merge the branches using "
+                "alembic merge."
+            )
+        ):
+            return self.revision_map.get_current_head()
+
+    def get_heads(self) -> List[str]:
+        """Return all "versioned head" revisions as strings.
+
+        This is normally a list of length one,
+        unless branches are present.  The
+        :meth:`.ScriptDirectory.get_current_head()` method
+        can be used normally when a script directory
+        has only one head.
+
+        :return: a tuple of string revision numbers.
+        """
+        return list(self.revision_map.heads)
+
+    def get_base(self) -> Optional[str]:
+        """Return the "base" revision as a string.
+
+        This is the revision number of the script that
+        has a ``down_revision`` of None.
+
+        If the script directory has multiple bases, an error is raised;
+        :meth:`.ScriptDirectory.get_bases` should be
+        preferred.
+
+        """
+        bases = self.get_bases()
+        if len(bases) > 1:
+            raise util.CommandError(
+                "The script directory has multiple bases. "
+                "Please use get_bases()."
+            )
+        elif bases:
+            return bases[0]
+        else:
+            return None
+
+    def get_bases(self) -> List[str]:
+        """return all "base" revisions as strings.
+
+        This is the revision number of all scripts that
+        have a ``down_revision`` of None.
+
+        """
+        return list(self.revision_map.bases)
+
+    def _upgrade_revs(
+        self, destination: str, current_rev: str
+    ) -> List[RevisionStep]:
+        with self._catch_revision_errors(
+            ancestor="Destination %(end)s is not a valid upgrade "
+            "target from current head(s)",
+            end=destination,
+        ):
+            revs = self.iterate_revisions(
+                destination, current_rev, implicit_base=True
+            )
+            return [
+                migration.MigrationStep.upgrade_from_script(
+                    self.revision_map, script
+                )
+                for script in reversed(list(revs))
+            ]
+
+    def _downgrade_revs(
+        self, destination: str, current_rev: Optional[str]
+    ) -> List[RevisionStep]:
+        with self._catch_revision_errors(
+            ancestor="Destination %(end)s is not a valid downgrade "
+            "target from current head(s)",
+            end=destination,
+        ):
+            revs = self.iterate_revisions(
+                current_rev, destination, select_for_downgrade=True
+            )
+            return [
+                migration.MigrationStep.downgrade_from_script(
+                    self.revision_map, script
+                )
+                for script in revs
+            ]
+
+    def _stamp_revs(
+        self, revision: _RevIdType, heads: _RevIdType
+    ) -> List[StampStep]:
+        with self._catch_revision_errors(
+            multiple_heads="Multiple heads are present; please specify a "
+            "single target revision"
+        ):
+            heads_revs = self.get_revisions(heads)
+
+            steps = []
+
+            if not revision:
+                revision = "base"
+
+            filtered_heads: List[Script] = []
+            for rev in util.to_tuple(revision):
+                if rev:
+                    filtered_heads.extend(
+                        self.revision_map.filter_for_lineage(
+                            cast(Sequence[Script], heads_revs),
+                            rev,
+                            include_dependencies=True,
+                        )
+                    )
+            filtered_heads = util.unique_list(filtered_heads)
+
+            dests = self.get_revisions(revision) or [None]
+
+            for dest in dests:
+                if dest is None:
+                    # dest is 'base'.  Return a "delete branch" migration
+                    # for all applicable heads.
+                    steps.extend(
+                        [
+                            migration.StampStep(
+                                head.revision,
+                                None,
+                                False,
+                                True,
+                                self.revision_map,
+                            )
+                            for head in filtered_heads
+                        ]
+                    )
+                    continue
+                elif dest in filtered_heads:
+                    # the dest is already in the version table, do nothing.
+                    continue
+
+                # figure out if the dest is a descendant or an
+                # ancestor of the selected nodes
+                descendants = set(
+                    self.revision_map._get_descendant_nodes([dest])
+                )
+                ancestors = set(self.revision_map._get_ancestor_nodes([dest]))
+
+                if descendants.intersection(filtered_heads):
+                    # heads are above the target, so this is a downgrade.
+                    # we can treat them as a "merge", single step.
+                    assert not ancestors.intersection(filtered_heads)
+                    todo_heads = [head.revision for head in filtered_heads]
+                    step = migration.StampStep(
+                        todo_heads,
+                        dest.revision,
+                        False,
+                        False,
+                        self.revision_map,
+                    )
+                    steps.append(step)
+                    continue
+                elif ancestors.intersection(filtered_heads):
+                    # heads are below the target, so this is an upgrade.
+                    # we can treat them as a "merge", single step.
+                    todo_heads = [head.revision for head in filtered_heads]
+                    step = migration.StampStep(
+                        todo_heads,
+                        dest.revision,
+                        True,
+                        False,
+                        self.revision_map,
+                    )
+                    steps.append(step)
+                    continue
+                else:
+                    # destination is in a branch not represented,
+                    # treat it as new branch
+                    step = migration.StampStep(
+                        (), dest.revision, True, True, self.revision_map
+                    )
+                    steps.append(step)
+                    continue
+
+            return steps
+
+    def run_env(self) -> None:
+        """Run the script environment.
+
+        This basically runs the ``env.py`` script present
+        in the migration environment.   It is called exclusively
+        by the command functions in :mod:`alembic.command`.
+
+
+        """
+        util.load_python_file(self.dir, "env.py")
+
+    @property
+    def env_py_location(self) -> str:
+        return os.path.abspath(os.path.join(self.dir, "env.py"))
+
+    def _generate_template(self, src: str, dest: str, **kw: Any) -> None:
+        with util.status(
+            f"Generating {os.path.abspath(dest)}", **self.messaging_opts
+        ):
+            util.template_to_file(src, dest, self.output_encoding, **kw)
+
+    def _copy_file(self, src: str, dest: str) -> None:
+        with util.status(
+            f"Generating {os.path.abspath(dest)}", **self.messaging_opts
+        ):
+            shutil.copy(src, dest)
+
+    def _ensure_directory(self, path: str) -> None:
+        path = os.path.abspath(path)
+        if not os.path.exists(path):
+            with util.status(
+                f"Creating directory {path}", **self.messaging_opts
+            ):
+                os.makedirs(path)
+
+    def _generate_create_date(self) -> datetime.datetime:
+        if self.timezone is not None:
+            if ZoneInfo is None:
+                raise util.CommandError(
+                    "Python >= 3.9 is required for timezone support or"
+                    "the 'backports.zoneinfo' package must be installed."
+                )
+            # First, assume correct capitalization
+            try:
+                tzinfo = ZoneInfo(self.timezone)
+            except ZoneInfoNotFoundError:
+                tzinfo = None
+            if tzinfo is None:
+                try:
+                    tzinfo = ZoneInfo(self.timezone.upper())
+                except ZoneInfoNotFoundError:
+                    raise util.CommandError(
+                        "Can't locate timezone: %s" % self.timezone
+                    ) from None
+            create_date = (
+                datetime.datetime.utcnow()
+                .replace(tzinfo=datetime.timezone.utc)
+                .astimezone(tzinfo)
+            )
+        else:
+            create_date = datetime.datetime.now()
+        return create_date
+
+    def generate_revision(
+        self,
+        revid: str,
+        message: Optional[str],
+        head: Optional[_RevIdType] = None,
+        splice: Optional[bool] = False,
+        branch_labels: Optional[_RevIdType] = None,
+        version_path: Optional[str] = None,
+        depends_on: Optional[_RevIdType] = None,
+        **kw: Any,
+    ) -> Optional[Script]:
+        """Generate a new revision file.
+
+        This runs the ``script.py.mako`` template, given
+        template arguments, and creates a new file.
+
+        :param revid: String revision id.  Typically this
+         comes from ``alembic.util.rev_id()``.
+        :param message: the revision message, the one passed
+         by the -m argument to the ``revision`` command.
+        :param head: the head revision to generate against.  Defaults
+         to the current "head" if no branches are present, else raises
+         an exception.
+        :param splice: if True, allow the "head" version to not be an
+         actual head; otherwise, the selected head must be a head
+         (e.g. endpoint) revision.
+
+        """
+        if head is None:
+            head = "head"
+
+        try:
+            Script.verify_rev_id(revid)
+        except revision.RevisionError as err:
+            raise util.CommandError(err.args[0]) from err
+
+        with self._catch_revision_errors(
+            multiple_heads=(
+                "Multiple heads are present; please specify the head "
+                "revision on which the new revision should be based, "
+                "or perform a merge."
+            )
+        ):
+            heads = cast(
+                Tuple[Optional["Revision"], ...],
+                self.revision_map.get_revisions(head),
+            )
+            for h in heads:
+                assert h != "base"  # type: ignore[comparison-overlap]
+
+        if len(set(heads)) != len(heads):
+            raise util.CommandError("Duplicate head revisions specified")
+
+        create_date = self._generate_create_date()
+
+        if version_path is None:
+            if len(self._version_locations) > 1:
+                for head_ in heads:
+                    if head_ is not None:
+                        assert isinstance(head_, Script)
+                        version_path = os.path.dirname(head_.path)
+                        break
+                else:
+                    raise util.CommandError(
+                        "Multiple version locations present, "
+                        "please specify --version-path"
+                    )
+            else:
+                version_path = self.versions
+
+        norm_path = os.path.normpath(os.path.abspath(version_path))
+        for vers_path in self._version_locations:
+            if os.path.normpath(vers_path) == norm_path:
+                break
+        else:
+            raise util.CommandError(
+                "Path %s is not represented in current "
+                "version locations" % version_path
+            )
+
+        if self.version_locations:
+            self._ensure_directory(version_path)
+
+        path = self._rev_path(version_path, revid, message, create_date)
+
+        if not splice:
+            for head_ in heads:
+                if head_ is not None and not head_.is_head:
+                    raise util.CommandError(
+                        "Revision %s is not a head revision; please specify "
+                        "--splice to create a new branch from this revision"
+                        % head_.revision
+                    )
+
+        resolved_depends_on: Optional[List[str]]
+        if depends_on:
+            with self._catch_revision_errors():
+                resolved_depends_on = [
+                    dep
+                    if dep in rev.branch_labels  # maintain branch labels
+                    else rev.revision  # resolve partial revision identifiers
+                    for rev, dep in [
+                        (not_none(self.revision_map.get_revision(dep)), dep)
+                        for dep in util.to_list(depends_on)
+                    ]
+                ]
+        else:
+            resolved_depends_on = None
+
+        self._generate_template(
+            os.path.join(self.dir, "script.py.mako"),
+            path,
+            up_revision=str(revid),
+            down_revision=revision.tuple_rev_as_scalar(
+                tuple(h.revision if h is not None else None for h in heads)
+            ),
+            branch_labels=util.to_tuple(branch_labels),
+            depends_on=revision.tuple_rev_as_scalar(resolved_depends_on),
+            create_date=create_date,
+            comma=util.format_as_comma,
+            message=message if message is not None else ("empty message"),
+            **kw,
+        )
+
+        post_write_hooks = self.hook_config
+        if post_write_hooks:
+            write_hooks._run_hooks(path, post_write_hooks)
+
+        try:
+            script = Script._from_path(self, path)
+        except revision.RevisionError as err:
+            raise util.CommandError(err.args[0]) from err
+        if script is None:
+            return None
+        if branch_labels and not script.branch_labels:
+            raise util.CommandError(
+                "Version %s specified branch_labels %s, however the "
+                "migration file %s does not have them; have you upgraded "
+                "your script.py.mako to include the "
+                "'branch_labels' section?"
+                % (script.revision, branch_labels, script.path)
+            )
+        self.revision_map.add_revision(script)
+        return script
+
+    def _rev_path(
+        self,
+        path: str,
+        rev_id: str,
+        message: Optional[str],
+        create_date: datetime.datetime,
+    ) -> str:
+        epoch = int(create_date.timestamp())
+        slug = "_".join(_slug_re.findall(message or "")).lower()
+        if len(slug) > self.truncate_slug_length:
+            slug = slug[: self.truncate_slug_length].rsplit("_", 1)[0] + "_"
+        filename = "%s.py" % (
+            self.file_template
+            % {
+                "rev": rev_id,
+                "slug": slug,
+                "epoch": epoch,
+                "year": create_date.year,
+                "month": create_date.month,
+                "day": create_date.day,
+                "hour": create_date.hour,
+                "minute": create_date.minute,
+                "second": create_date.second,
+            }
+        )
+        return os.path.join(path, filename)
+
+
+class Script(revision.Revision):
+
+    """Represent a single revision file in a ``versions/`` directory.
+
+    The :class:`.Script` instance is returned by methods
+    such as :meth:`.ScriptDirectory.iterate_revisions`.
+
+    """
+
+    def __init__(self, module: ModuleType, rev_id: str, path: str):
+        self.module = module
+        self.path = path
+        super().__init__(
+            rev_id,
+            module.down_revision,
+            branch_labels=util.to_tuple(
+                getattr(module, "branch_labels", None), default=()
+            ),
+            dependencies=util.to_tuple(
+                getattr(module, "depends_on", None), default=()
+            ),
+        )
+
+    module: ModuleType
+    """The Python module representing the actual script itself."""
+
+    path: str
+    """Filesystem path of the script."""
+
+    _db_current_indicator: Optional[bool] = None
+    """Utility variable which when set will cause string output to indicate
+    this is a "current" version in some database"""
+
+    @property
+    def doc(self) -> str:
+        """Return the docstring given in the script."""
+
+        return re.split("\n\n", self.longdoc)[0]
+
+    @property
+    def longdoc(self) -> str:
+        """Return the docstring given in the script."""
+
+        doc = self.module.__doc__
+        if doc:
+            if hasattr(self.module, "_alembic_source_encoding"):
+                doc = doc.decode(  # type: ignore[attr-defined]
+                    self.module._alembic_source_encoding
+                )
+            return doc.strip()  # type: ignore[union-attr]
+        else:
+            return ""
+
+    @property
+    def log_entry(self) -> str:
+        entry = "Rev: %s%s%s%s%s\n" % (
+            self.revision,
+            " (head)" if self.is_head else "",
+            " (branchpoint)" if self.is_branch_point else "",
+            " (mergepoint)" if self.is_merge_point else "",
+            " (current)" if self._db_current_indicator else "",
+        )
+        if self.is_merge_point:
+            entry += "Merges: %s\n" % (self._format_down_revision(),)
+        else:
+            entry += "Parent: %s\n" % (self._format_down_revision(),)
+
+        if self.dependencies:
+            entry += "Also depends on: %s\n" % (
+                util.format_as_comma(self.dependencies)
+            )
+
+        if self.is_branch_point:
+            entry += "Branches into: %s\n" % (
+                util.format_as_comma(self.nextrev)
+            )
+
+        if self.branch_labels:
+            entry += "Branch names: %s\n" % (
+                util.format_as_comma(self.branch_labels),
+            )
+
+        entry += "Path: %s\n" % (self.path,)
+
+        entry += "\n%s\n" % (
+            "\n".join("    %s" % para for para in self.longdoc.splitlines())
+        )
+        return entry
+
+    def __str__(self) -> str:
+        return "%s -> %s%s%s%s, %s" % (
+            self._format_down_revision(),
+            self.revision,
+            " (head)" if self.is_head else "",
+            " (branchpoint)" if self.is_branch_point else "",
+            " (mergepoint)" if self.is_merge_point else "",
+            self.doc,
+        )
+
+    def _head_only(
+        self,
+        include_branches: bool = False,
+        include_doc: bool = False,
+        include_parents: bool = False,
+        tree_indicators: bool = True,
+        head_indicators: bool = True,
+    ) -> str:
+        text = self.revision
+        if include_parents:
+            if self.dependencies:
+                text = "%s (%s) -> %s" % (
+                    self._format_down_revision(),
+                    util.format_as_comma(self.dependencies),
+                    text,
+                )
+            else:
+                text = "%s -> %s" % (self._format_down_revision(), text)
+        assert text is not None
+        if include_branches and self.branch_labels:
+            text += " (%s)" % util.format_as_comma(self.branch_labels)
+        if head_indicators or tree_indicators:
+            text += "%s%s%s" % (
+                " (head)" if self._is_real_head else "",
+                " (effective head)"
+                if self.is_head and not self._is_real_head
+                else "",
+                " (current)" if self._db_current_indicator else "",
+            )
+        if tree_indicators:
+            text += "%s%s" % (
+                " (branchpoint)" if self.is_branch_point else "",
+                " (mergepoint)" if self.is_merge_point else "",
+            )
+        if include_doc:
+            text += ", %s" % self.doc
+        return text
+
+    def cmd_format(
+        self,
+        verbose: bool,
+        include_branches: bool = False,
+        include_doc: bool = False,
+        include_parents: bool = False,
+        tree_indicators: bool = True,
+    ) -> str:
+        if verbose:
+            return self.log_entry
+        else:
+            return self._head_only(
+                include_branches, include_doc, include_parents, tree_indicators
+            )
+
+    def _format_down_revision(self) -> str:
+        if not self.down_revision:
+            return "<base>"
+        else:
+            return util.format_as_comma(self._versioned_down_revisions)
+
+    @classmethod
+    def _from_path(
+        cls, scriptdir: ScriptDirectory, path: str
+    ) -> Optional[Script]:
+        dir_, filename = os.path.split(path)
+        return cls._from_filename(scriptdir, dir_, filename)
+
+    @classmethod
+    def _list_py_dir(cls, scriptdir: ScriptDirectory, path: str) -> List[str]:
+        paths = []
+        for root, dirs, files in os.walk(path, topdown=True):
+            if root.endswith("__pycache__"):
+                # a special case - we may include these files
+                # if a `sourceless` option is specified
+                continue
+
+            for filename in sorted(files):
+                paths.append(os.path.join(root, filename))
+
+            if scriptdir.sourceless:
+                # look for __pycache__
+                py_cache_path = os.path.join(root, "__pycache__")
+                if os.path.exists(py_cache_path):
+                    # add all files from __pycache__ whose filename is not
+                    # already in the names we got from the version directory.
+                    # add as relative paths including __pycache__ token
+                    names = {filename.split(".")[0] for filename in files}
+                    paths.extend(
+                        os.path.join(py_cache_path, pyc)
+                        for pyc in os.listdir(py_cache_path)
+                        if pyc.split(".")[0] not in names
+                    )
+
+            if not scriptdir.recursive_version_locations:
+                break
+
+            # the real script order is defined by revision,
+            # but it may be undefined if there are many files with a same
+            # `down_revision`, for a better user experience (ex. debugging),
+            # we use a deterministic order
+            dirs.sort()
+
+        return paths
+
+    @classmethod
+    def _from_filename(
+        cls, scriptdir: ScriptDirectory, dir_: str, filename: str
+    ) -> Optional[Script]:
+        if scriptdir.sourceless:
+            py_match = _sourceless_rev_file.match(filename)
+        else:
+            py_match = _only_source_rev_file.match(filename)
+
+        if not py_match:
+            return None
+
+        py_filename = py_match.group(1)
+
+        if scriptdir.sourceless:
+            is_c = py_match.group(2) == "c"
+            is_o = py_match.group(2) == "o"
+        else:
+            is_c = is_o = False
+
+        if is_o or is_c:
+            py_exists = os.path.exists(os.path.join(dir_, py_filename))
+            pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c"))
+
+            # prefer .py over .pyc because we'd like to get the
+            # source encoding; prefer .pyc over .pyo because we'd like to
+            # have the docstrings which a -OO file would not have
+            if py_exists or is_o and pyc_exists:
+                return None
+
+        module = util.load_python_file(dir_, filename)
+
+        if not hasattr(module, "revision"):
+            # attempt to get the revision id from the script name,
+            # this for legacy only
+            m = _legacy_rev.match(filename)
+            if not m:
+                raise util.CommandError(
+                    "Could not determine revision id from filename %s. "
+                    "Be sure the 'revision' variable is "
+                    "declared inside the script (please see 'Upgrading "
+                    "from Alembic 0.1 to 0.2' in the documentation)."
+                    % filename
+                )
+            else:
+                revision = m.group(1)
+        else:
+            revision = module.revision
+        return Script(module, revision, os.path.join(dir_, filename))
diff --git a/venv/Lib/site-packages/alembic/script/revision.py b/venv/Lib/site-packages/alembic/script/revision.py
new file mode 100644
index 0000000000000000000000000000000000000000..77a802cdcadf9c59049fdb5db1c2be95d305a1ae
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/script/revision.py
@@ -0,0 +1,1721 @@
+from __future__ import annotations
+
+import collections
+import re
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Collection
+from typing import Deque
+from typing import Dict
+from typing import FrozenSet
+from typing import Iterable
+from typing import Iterator
+from typing import List
+from typing import Optional
+from typing import overload
+from typing import Protocol
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy import util as sqlautil
+
+from .. import util
+from ..util import not_none
+
+if TYPE_CHECKING:
+    from typing import Literal
+
+_RevIdType = Union[str, List[str], Tuple[str, ...]]
+_GetRevArg = Union[
+    str,
+    Iterable[Optional[str]],
+    Iterable[str],
+]
+_RevisionIdentifierType = Union[str, Tuple[str, ...], None]
+_RevisionOrStr = Union["Revision", str]
+_RevisionOrBase = Union["Revision", "Literal['base']"]
+_InterimRevisionMapType = Dict[str, "Revision"]
+_RevisionMapType = Dict[Union[None, str, Tuple[()]], Optional["Revision"]]
+_T = TypeVar("_T")
+_TR = TypeVar("_TR", bound=Optional[_RevisionOrStr])
+
+_relative_destination = re.compile(r"(?:(.+?)@)?(\w+)?((?:\+|-)\d+)")
+_revision_illegal_chars = ["@", "-", "+"]
+
+
+class _CollectRevisionsProtocol(Protocol):
+    def __call__(
+        self,
+        upper: _RevisionIdentifierType,
+        lower: _RevisionIdentifierType,
+        inclusive: bool,
+        implicit_base: bool,
+        assert_relative_length: bool,
+    ) -> Tuple[Set[Revision], Tuple[Optional[_RevisionOrBase], ...]]:
+        ...
+
+
+class RevisionError(Exception):
+    pass
+
+
+class RangeNotAncestorError(RevisionError):
+    def __init__(
+        self, lower: _RevisionIdentifierType, upper: _RevisionIdentifierType
+    ) -> None:
+        self.lower = lower
+        self.upper = upper
+        super().__init__(
+            "Revision %s is not an ancestor of revision %s"
+            % (lower or "base", upper or "base")
+        )
+
+
+class MultipleHeads(RevisionError):
+    def __init__(self, heads: Sequence[str], argument: Optional[str]) -> None:
+        self.heads = heads
+        self.argument = argument
+        super().__init__(
+            "Multiple heads are present for given argument '%s'; "
+            "%s" % (argument, ", ".join(heads))
+        )
+
+
+class ResolutionError(RevisionError):
+    def __init__(self, message: str, argument: str) -> None:
+        super().__init__(message)
+        self.argument = argument
+
+
+class CycleDetected(RevisionError):
+    kind = "Cycle"
+
+    def __init__(self, revisions: Sequence[str]) -> None:
+        self.revisions = revisions
+        super().__init__(
+            "%s is detected in revisions (%s)"
+            % (self.kind, ", ".join(revisions))
+        )
+
+
+class DependencyCycleDetected(CycleDetected):
+    kind = "Dependency cycle"
+
+    def __init__(self, revisions: Sequence[str]) -> None:
+        super().__init__(revisions)
+
+
+class LoopDetected(CycleDetected):
+    kind = "Self-loop"
+
+    def __init__(self, revision: str) -> None:
+        super().__init__([revision])
+
+
+class DependencyLoopDetected(DependencyCycleDetected, LoopDetected):
+    kind = "Dependency self-loop"
+
+    def __init__(self, revision: Sequence[str]) -> None:
+        super().__init__(revision)
+
+
+class RevisionMap:
+    """Maintains a map of :class:`.Revision` objects.
+
+    :class:`.RevisionMap` is used by :class:`.ScriptDirectory` to maintain
+    and traverse the collection of :class:`.Script` objects, which are
+    themselves instances of :class:`.Revision`.
+
+    """
+
+    def __init__(self, generator: Callable[[], Iterable[Revision]]) -> None:
+        """Construct a new :class:`.RevisionMap`.
+
+        :param generator: a zero-arg callable that will generate an iterable
+         of :class:`.Revision` instances to be used.   These are typically
+         :class:`.Script` subclasses within regular Alembic use.
+
+        """
+        self._generator = generator
+
+    @util.memoized_property
+    def heads(self) -> Tuple[str, ...]:
+        """All "head" revisions as strings.
+
+        This is normally a tuple of length one,
+        unless unmerged branches are present.
+
+        :return: a tuple of string revision numbers.
+
+        """
+        self._revision_map
+        return self.heads
+
+    @util.memoized_property
+    def bases(self) -> Tuple[str, ...]:
+        """All "base" revisions as strings.
+
+        These are revisions that have a ``down_revision`` of None,
+        or empty tuple.
+
+        :return: a tuple of string revision numbers.
+
+        """
+        self._revision_map
+        return self.bases
+
+    @util.memoized_property
+    def _real_heads(self) -> Tuple[str, ...]:
+        """All "real" head revisions as strings.
+
+        :return: a tuple of string revision numbers.
+
+        """
+        self._revision_map
+        return self._real_heads
+
+    @util.memoized_property
+    def _real_bases(self) -> Tuple[str, ...]:
+        """All "real" base revisions as strings.
+
+        :return: a tuple of string revision numbers.
+
+        """
+        self._revision_map
+        return self._real_bases
+
+    @util.memoized_property
+    def _revision_map(self) -> _RevisionMapType:
+        """memoized attribute, initializes the revision map from the
+        initial collection.
+
+        """
+        # Ordering required for some tests to pass (but not required in
+        # general)
+        map_: _InterimRevisionMapType = sqlautil.OrderedDict()
+
+        heads: Set[Revision] = sqlautil.OrderedSet()
+        _real_heads: Set[Revision] = sqlautil.OrderedSet()
+        bases: Tuple[Revision, ...] = ()
+        _real_bases: Tuple[Revision, ...] = ()
+
+        has_branch_labels = set()
+        all_revisions = set()
+
+        for revision in self._generator():
+            all_revisions.add(revision)
+
+            if revision.revision in map_:
+                util.warn(
+                    "Revision %s is present more than once" % revision.revision
+                )
+            map_[revision.revision] = revision
+            if revision.branch_labels:
+                has_branch_labels.add(revision)
+
+            heads.add(revision)
+            _real_heads.add(revision)
+            if revision.is_base:
+                bases += (revision,)
+            if revision._is_real_base:
+                _real_bases += (revision,)
+
+        # add the branch_labels to the map_.  We'll need these
+        # to resolve the dependencies.
+        rev_map = map_.copy()
+        self._map_branch_labels(
+            has_branch_labels, cast(_RevisionMapType, map_)
+        )
+
+        # resolve dependency names from branch labels and symbolic
+        # names
+        self._add_depends_on(all_revisions, cast(_RevisionMapType, map_))
+
+        for rev in map_.values():
+            for downrev in rev._all_down_revisions:
+                if downrev not in map_:
+                    util.warn(
+                        "Revision %s referenced from %s is not present"
+                        % (downrev, rev)
+                    )
+                down_revision = map_[downrev]
+                down_revision.add_nextrev(rev)
+                if downrev in rev._versioned_down_revisions:
+                    heads.discard(down_revision)
+                _real_heads.discard(down_revision)
+
+        # once the map has downrevisions populated, the dependencies
+        # can be further refined to include only those which are not
+        # already ancestors
+        self._normalize_depends_on(all_revisions, cast(_RevisionMapType, map_))
+        self._detect_cycles(rev_map, heads, bases, _real_heads, _real_bases)
+
+        revision_map: _RevisionMapType = dict(map_.items())
+        revision_map[None] = revision_map[()] = None
+        self.heads = tuple(rev.revision for rev in heads)
+        self._real_heads = tuple(rev.revision for rev in _real_heads)
+        self.bases = tuple(rev.revision for rev in bases)
+        self._real_bases = tuple(rev.revision for rev in _real_bases)
+
+        self._add_branches(has_branch_labels, revision_map)
+        return revision_map
+
+    def _detect_cycles(
+        self,
+        rev_map: _InterimRevisionMapType,
+        heads: Set[Revision],
+        bases: Tuple[Revision, ...],
+        _real_heads: Set[Revision],
+        _real_bases: Tuple[Revision, ...],
+    ) -> None:
+        if not rev_map:
+            return
+        if not heads or not bases:
+            raise CycleDetected(list(rev_map))
+        total_space = {
+            rev.revision
+            for rev in self._iterate_related_revisions(
+                lambda r: r._versioned_down_revisions,
+                heads,
+                map_=cast(_RevisionMapType, rev_map),
+            )
+        }.intersection(
+            rev.revision
+            for rev in self._iterate_related_revisions(
+                lambda r: r.nextrev,
+                bases,
+                map_=cast(_RevisionMapType, rev_map),
+            )
+        )
+        deleted_revs = set(rev_map.keys()) - total_space
+        if deleted_revs:
+            raise CycleDetected(sorted(deleted_revs))
+
+        if not _real_heads or not _real_bases:
+            raise DependencyCycleDetected(list(rev_map))
+        total_space = {
+            rev.revision
+            for rev in self._iterate_related_revisions(
+                lambda r: r._all_down_revisions,
+                _real_heads,
+                map_=cast(_RevisionMapType, rev_map),
+            )
+        }.intersection(
+            rev.revision
+            for rev in self._iterate_related_revisions(
+                lambda r: r._all_nextrev,
+                _real_bases,
+                map_=cast(_RevisionMapType, rev_map),
+            )
+        )
+        deleted_revs = set(rev_map.keys()) - total_space
+        if deleted_revs:
+            raise DependencyCycleDetected(sorted(deleted_revs))
+
+    def _map_branch_labels(
+        self, revisions: Collection[Revision], map_: _RevisionMapType
+    ) -> None:
+        for revision in revisions:
+            if revision.branch_labels:
+                assert revision._orig_branch_labels is not None
+                for branch_label in revision._orig_branch_labels:
+                    if branch_label in map_:
+                        map_rev = map_[branch_label]
+                        assert map_rev is not None
+                        raise RevisionError(
+                            "Branch name '%s' in revision %s already "
+                            "used by revision %s"
+                            % (
+                                branch_label,
+                                revision.revision,
+                                map_rev.revision,
+                            )
+                        )
+                    map_[branch_label] = revision
+
+    def _add_branches(
+        self, revisions: Collection[Revision], map_: _RevisionMapType
+    ) -> None:
+        for revision in revisions:
+            if revision.branch_labels:
+                revision.branch_labels.update(revision.branch_labels)
+                for node in self._get_descendant_nodes(
+                    [revision], map_, include_dependencies=False
+                ):
+                    node.branch_labels.update(revision.branch_labels)
+
+                parent = node
+                while (
+                    parent
+                    and not parent._is_real_branch_point
+                    and not parent.is_merge_point
+                ):
+                    parent.branch_labels.update(revision.branch_labels)
+                    if parent.down_revision:
+                        parent = map_[parent.down_revision]
+                    else:
+                        break
+
+    def _add_depends_on(
+        self, revisions: Collection[Revision], map_: _RevisionMapType
+    ) -> None:
+        """Resolve the 'dependencies' for each revision in a collection
+        in terms of actual revision ids, as opposed to branch labels or other
+        symbolic names.
+
+        The collection is then assigned to the _resolved_dependencies
+        attribute on each revision object.
+
+        """
+
+        for revision in revisions:
+            if revision.dependencies:
+                deps = [
+                    map_[dep] for dep in util.to_tuple(revision.dependencies)
+                ]
+                revision._resolved_dependencies = tuple(
+                    [d.revision for d in deps if d is not None]
+                )
+            else:
+                revision._resolved_dependencies = ()
+
+    def _normalize_depends_on(
+        self, revisions: Collection[Revision], map_: _RevisionMapType
+    ) -> None:
+        """Create a collection of "dependencies" that omits dependencies
+        that are already ancestor nodes for each revision in a given
+        collection.
+
+        This builds upon the _resolved_dependencies collection created in the
+        _add_depends_on() method, looking in the fully populated revision map
+        for ancestors, and omitting them as the _resolved_dependencies
+        collection as it is copied to a new collection. The new collection is
+        then assigned to the _normalized_resolved_dependencies attribute on
+        each revision object.
+
+        The collection is then used to determine the immediate "down revision"
+        identifiers for this revision.
+
+        """
+
+        for revision in revisions:
+            if revision._resolved_dependencies:
+                normalized_resolved = set(revision._resolved_dependencies)
+                for rev in self._get_ancestor_nodes(
+                    [revision],
+                    include_dependencies=False,
+                    map_=map_,
+                ):
+                    if rev is revision:
+                        continue
+                    elif rev._resolved_dependencies:
+                        normalized_resolved.difference_update(
+                            rev._resolved_dependencies
+                        )
+
+                revision._normalized_resolved_dependencies = tuple(
+                    normalized_resolved
+                )
+            else:
+                revision._normalized_resolved_dependencies = ()
+
+    def add_revision(self, revision: Revision, _replace: bool = False) -> None:
+        """add a single revision to an existing map.
+
+        This method is for single-revision use cases, it's not
+        appropriate for fully populating an entire revision map.
+
+        """
+        map_ = self._revision_map
+        if not _replace and revision.revision in map_:
+            util.warn(
+                "Revision %s is present more than once" % revision.revision
+            )
+        elif _replace and revision.revision not in map_:
+            raise Exception("revision %s not in map" % revision.revision)
+
+        map_[revision.revision] = revision
+
+        revisions = [revision]
+        self._add_branches(revisions, map_)
+        self._map_branch_labels(revisions, map_)
+        self._add_depends_on(revisions, map_)
+
+        if revision.is_base:
+            self.bases += (revision.revision,)
+        if revision._is_real_base:
+            self._real_bases += (revision.revision,)
+
+        for downrev in revision._all_down_revisions:
+            if downrev not in map_:
+                util.warn(
+                    "Revision %s referenced from %s is not present"
+                    % (downrev, revision)
+                )
+            not_none(map_[downrev]).add_nextrev(revision)
+
+        self._normalize_depends_on(revisions, map_)
+
+        if revision._is_real_head:
+            self._real_heads = tuple(
+                head
+                for head in self._real_heads
+                if head
+                not in set(revision._all_down_revisions).union(
+                    [revision.revision]
+                )
+            ) + (revision.revision,)
+        if revision.is_head:
+            self.heads = tuple(
+                head
+                for head in self.heads
+                if head
+                not in set(revision._versioned_down_revisions).union(
+                    [revision.revision]
+                )
+            ) + (revision.revision,)
+
+    def get_current_head(
+        self, branch_label: Optional[str] = None
+    ) -> Optional[str]:
+        """Return the current head revision.
+
+        If the script directory has multiple heads
+        due to branching, an error is raised;
+        :meth:`.ScriptDirectory.get_heads` should be
+        preferred.
+
+        :param branch_label: optional branch name which will limit the
+         heads considered to those which include that branch_label.
+
+        :return: a string revision number.
+
+        .. seealso::
+
+            :meth:`.ScriptDirectory.get_heads`
+
+        """
+        current_heads: Sequence[str] = self.heads
+        if branch_label:
+            current_heads = self.filter_for_lineage(
+                current_heads, branch_label
+            )
+        if len(current_heads) > 1:
+            raise MultipleHeads(
+                current_heads,
+                "%s@head" % branch_label if branch_label else "head",
+            )
+
+        if current_heads:
+            return current_heads[0]
+        else:
+            return None
+
+    def _get_base_revisions(self, identifier: str) -> Tuple[str, ...]:
+        return self.filter_for_lineage(self.bases, identifier)
+
+    def get_revisions(
+        self, id_: Optional[_GetRevArg]
+    ) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """Return the :class:`.Revision` instances with the given rev id
+        or identifiers.
+
+        May be given a single identifier, a sequence of identifiers, or the
+        special symbols "head" or "base".  The result is a tuple of one
+        or more identifiers, or an empty tuple in the case of "base".
+
+        In the cases where 'head', 'heads' is requested and the
+        revision map is empty, returns an empty tuple.
+
+        Supports partial identifiers, where the given identifier
+        is matched against all identifiers that start with the given
+        characters; if there is exactly one match, that determines the
+        full revision.
+
+        """
+
+        if isinstance(id_, (list, tuple, set, frozenset)):
+            return sum([self.get_revisions(id_elem) for id_elem in id_], ())
+        else:
+            resolved_id, branch_label = self._resolve_revision_number(id_)
+            if len(resolved_id) == 1:
+                try:
+                    rint = int(resolved_id[0])
+                    if rint < 0:
+                        # branch@-n -> walk down from heads
+                        select_heads = self.get_revisions("heads")
+                        if branch_label is not None:
+                            select_heads = tuple(
+                                head
+                                for head in select_heads
+                                if branch_label
+                                in is_revision(head).branch_labels
+                            )
+                        return tuple(
+                            self._walk(head, steps=rint)
+                            for head in select_heads
+                        )
+                except ValueError:
+                    # couldn't resolve as integer
+                    pass
+            return tuple(
+                self._revision_for_ident(rev_id, branch_label)
+                for rev_id in resolved_id
+            )
+
+    def get_revision(self, id_: Optional[str]) -> Optional[Revision]:
+        """Return the :class:`.Revision` instance with the given rev id.
+
+        If a symbolic name such as "head" or "base" is given, resolves
+        the identifier into the current head or base revision.  If the symbolic
+        name refers to multiples, :class:`.MultipleHeads` is raised.
+
+        Supports partial identifiers, where the given identifier
+        is matched against all identifiers that start with the given
+        characters; if there is exactly one match, that determines the
+        full revision.
+
+        """
+
+        resolved_id, branch_label = self._resolve_revision_number(id_)
+        if len(resolved_id) > 1:
+            raise MultipleHeads(resolved_id, id_)
+
+        resolved: Union[str, Tuple[()]] = resolved_id[0] if resolved_id else ()
+        return self._revision_for_ident(resolved, branch_label)
+
+    def _resolve_branch(self, branch_label: str) -> Optional[Revision]:
+        try:
+            branch_rev = self._revision_map[branch_label]
+        except KeyError:
+            try:
+                nonbranch_rev = self._revision_for_ident(branch_label)
+            except ResolutionError as re:
+                raise ResolutionError(
+                    "No such branch: '%s'" % branch_label, branch_label
+                ) from re
+
+            else:
+                return nonbranch_rev
+        else:
+            return branch_rev
+
+    def _revision_for_ident(
+        self,
+        resolved_id: Union[str, Tuple[()], None],
+        check_branch: Optional[str] = None,
+    ) -> Optional[Revision]:
+        branch_rev: Optional[Revision]
+        if check_branch:
+            branch_rev = self._resolve_branch(check_branch)
+        else:
+            branch_rev = None
+
+        revision: Union[Optional[Revision], Literal[False]]
+        try:
+            revision = self._revision_map[resolved_id]
+        except KeyError:
+            # break out to avoid misleading py3k stack traces
+            revision = False
+        revs: Sequence[str]
+        if revision is False:
+            assert resolved_id
+            # do a partial lookup
+            revs = [
+                x
+                for x in self._revision_map
+                if x and len(x) > 3 and x.startswith(resolved_id)
+            ]
+
+            if branch_rev:
+                revs = self.filter_for_lineage(revs, check_branch)
+            if not revs:
+                raise ResolutionError(
+                    "No such revision or branch '%s'%s"
+                    % (
+                        resolved_id,
+                        (
+                            "; please ensure at least four characters are "
+                            "present for partial revision identifier matches"
+                            if len(resolved_id) < 4
+                            else ""
+                        ),
+                    ),
+                    resolved_id,
+                )
+            elif len(revs) > 1:
+                raise ResolutionError(
+                    "Multiple revisions start "
+                    "with '%s': %s..."
+                    % (resolved_id, ", ".join("'%s'" % r for r in revs[0:3])),
+                    resolved_id,
+                )
+            else:
+                revision = self._revision_map[revs[0]]
+
+        if check_branch and revision is not None:
+            assert branch_rev is not None
+            assert resolved_id
+            if not self._shares_lineage(
+                revision.revision, branch_rev.revision
+            ):
+                raise ResolutionError(
+                    "Revision %s is not a member of branch '%s'"
+                    % (revision.revision, check_branch),
+                    resolved_id,
+                )
+        return revision
+
+    def _filter_into_branch_heads(
+        self, targets: Iterable[Optional[_RevisionOrBase]]
+    ) -> Set[Optional[_RevisionOrBase]]:
+        targets = set(targets)
+
+        for rev in list(targets):
+            assert rev
+            if targets.intersection(
+                self._get_descendant_nodes([rev], include_dependencies=False)
+            ).difference([rev]):
+                targets.discard(rev)
+        return targets
+
+    def filter_for_lineage(
+        self,
+        targets: Iterable[_TR],
+        check_against: Optional[str],
+        include_dependencies: bool = False,
+    ) -> Tuple[_TR, ...]:
+        id_, branch_label = self._resolve_revision_number(check_against)
+
+        shares = []
+        if branch_label:
+            shares.append(branch_label)
+        if id_:
+            shares.extend(id_)
+
+        return tuple(
+            tg
+            for tg in targets
+            if self._shares_lineage(
+                tg, shares, include_dependencies=include_dependencies
+            )
+        )
+
+    def _shares_lineage(
+        self,
+        target: Optional[_RevisionOrStr],
+        test_against_revs: Sequence[_RevisionOrStr],
+        include_dependencies: bool = False,
+    ) -> bool:
+        if not test_against_revs:
+            return True
+        if not isinstance(target, Revision):
+            resolved_target = not_none(self._revision_for_ident(target))
+        else:
+            resolved_target = target
+
+        resolved_test_against_revs = [
+            self._revision_for_ident(test_against_rev)
+            if not isinstance(test_against_rev, Revision)
+            else test_against_rev
+            for test_against_rev in util.to_tuple(
+                test_against_revs, default=()
+            )
+        ]
+
+        return bool(
+            set(
+                self._get_descendant_nodes(
+                    [resolved_target],
+                    include_dependencies=include_dependencies,
+                )
+            )
+            .union(
+                self._get_ancestor_nodes(
+                    [resolved_target],
+                    include_dependencies=include_dependencies,
+                )
+            )
+            .intersection(resolved_test_against_revs)
+        )
+
+    def _resolve_revision_number(
+        self, id_: Optional[_GetRevArg]
+    ) -> Tuple[Tuple[str, ...], Optional[str]]:
+        branch_label: Optional[str]
+        if isinstance(id_, str) and "@" in id_:
+            branch_label, id_ = id_.split("@", 1)
+
+        elif id_ is not None and (
+            (isinstance(id_, tuple) and id_ and not isinstance(id_[0], str))
+            or not isinstance(id_, (str, tuple))
+        ):
+            raise RevisionError(
+                "revision identifier %r is not a string; ensure database "
+                "driver settings are correct" % (id_,)
+            )
+
+        else:
+            branch_label = None
+
+        # ensure map is loaded
+        self._revision_map
+        if id_ == "heads":
+            if branch_label:
+                return (
+                    self.filter_for_lineage(self.heads, branch_label),
+                    branch_label,
+                )
+            else:
+                return self._real_heads, branch_label
+        elif id_ == "head":
+            current_head = self.get_current_head(branch_label)
+            if current_head:
+                return (current_head,), branch_label
+            else:
+                return (), branch_label
+        elif id_ == "base" or id_ is None:
+            return (), branch_label
+        else:
+            return util.to_tuple(id_, default=None), branch_label
+
+    def iterate_revisions(
+        self,
+        upper: _RevisionIdentifierType,
+        lower: _RevisionIdentifierType,
+        implicit_base: bool = False,
+        inclusive: bool = False,
+        assert_relative_length: bool = True,
+        select_for_downgrade: bool = False,
+    ) -> Iterator[Revision]:
+        """Iterate through script revisions, starting at the given
+        upper revision identifier and ending at the lower.
+
+        The traversal uses strictly the `down_revision`
+        marker inside each migration script, so
+        it is a requirement that upper >= lower,
+        else you'll get nothing back.
+
+        The iterator yields :class:`.Revision` objects.
+
+        """
+        fn: _CollectRevisionsProtocol
+        if select_for_downgrade:
+            fn = self._collect_downgrade_revisions
+        else:
+            fn = self._collect_upgrade_revisions
+
+        revisions, heads = fn(
+            upper,
+            lower,
+            inclusive=inclusive,
+            implicit_base=implicit_base,
+            assert_relative_length=assert_relative_length,
+        )
+
+        for node in self._topological_sort(revisions, heads):
+            yield not_none(self.get_revision(node))
+
+    def _get_descendant_nodes(
+        self,
+        targets: Collection[Optional[_RevisionOrBase]],
+        map_: Optional[_RevisionMapType] = None,
+        check: bool = False,
+        omit_immediate_dependencies: bool = False,
+        include_dependencies: bool = True,
+    ) -> Iterator[Any]:
+        if omit_immediate_dependencies:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                if rev not in targets:
+                    return rev._all_nextrev
+                else:
+                    return rev.nextrev
+
+        elif include_dependencies:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                return rev._all_nextrev
+
+        else:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                return rev.nextrev
+
+        return self._iterate_related_revisions(
+            fn, targets, map_=map_, check=check
+        )
+
+    def _get_ancestor_nodes(
+        self,
+        targets: Collection[Optional[_RevisionOrBase]],
+        map_: Optional[_RevisionMapType] = None,
+        check: bool = False,
+        include_dependencies: bool = True,
+    ) -> Iterator[Revision]:
+        if include_dependencies:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                return rev._normalized_down_revisions
+
+        else:
+
+            def fn(rev: Revision) -> Iterable[str]:
+                return rev._versioned_down_revisions
+
+        return self._iterate_related_revisions(
+            fn, targets, map_=map_, check=check
+        )
+
+    def _iterate_related_revisions(
+        self,
+        fn: Callable[[Revision], Iterable[str]],
+        targets: Collection[Optional[_RevisionOrBase]],
+        map_: Optional[_RevisionMapType],
+        check: bool = False,
+    ) -> Iterator[Revision]:
+        if map_ is None:
+            map_ = self._revision_map
+
+        seen = set()
+        todo: Deque[Revision] = collections.deque()
+        for target_for in targets:
+            target = is_revision(target_for)
+            todo.append(target)
+            if check:
+                per_target = set()
+
+            while todo:
+                rev = todo.pop()
+                if check:
+                    per_target.add(rev)
+
+                if rev in seen:
+                    continue
+                seen.add(rev)
+                # Check for map errors before collecting.
+                for rev_id in fn(rev):
+                    next_rev = map_[rev_id]
+                    assert next_rev is not None
+                    if next_rev.revision != rev_id:
+                        raise RevisionError(
+                            "Dependency resolution failed; broken map"
+                        )
+                    todo.append(next_rev)
+                yield rev
+            if check:
+                overlaps = per_target.intersection(targets).difference(
+                    [target]
+                )
+                if overlaps:
+                    raise RevisionError(
+                        "Requested revision %s overlaps with "
+                        "other requested revisions %s"
+                        % (
+                            target.revision,
+                            ", ".join(r.revision for r in overlaps),
+                        )
+                    )
+
+    def _topological_sort(
+        self,
+        revisions: Collection[Revision],
+        heads: Any,
+    ) -> List[str]:
+        """Yield revision ids of a collection of Revision objects in
+        topological sorted order (i.e. revisions always come after their
+        down_revisions and dependencies). Uses the order of keys in
+        _revision_map to sort.
+
+        """
+
+        id_to_rev = self._revision_map
+
+        def get_ancestors(rev_id: str) -> Set[str]:
+            return {
+                r.revision
+                for r in self._get_ancestor_nodes([id_to_rev[rev_id]])
+            }
+
+        todo = {d.revision for d in revisions}
+
+        # Use revision map (ordered dict) key order to pre-sort.
+        inserted_order = list(self._revision_map)
+
+        current_heads = list(
+            sorted(
+                {d.revision for d in heads if d.revision in todo},
+                key=inserted_order.index,
+            )
+        )
+        ancestors_by_idx = [get_ancestors(rev_id) for rev_id in current_heads]
+
+        output = []
+
+        current_candidate_idx = 0
+        while current_heads:
+            candidate = current_heads[current_candidate_idx]
+
+            for check_head_index, ancestors in enumerate(ancestors_by_idx):
+                # scan all the heads.  see if we can continue walking
+                # down the current branch indicated by current_candidate_idx.
+                if (
+                    check_head_index != current_candidate_idx
+                    and candidate in ancestors
+                ):
+                    current_candidate_idx = check_head_index
+                    # nope, another head is dependent on us, they have
+                    # to be traversed first
+                    break
+            else:
+                # yup, we can emit
+                if candidate in todo:
+                    output.append(candidate)
+                    todo.remove(candidate)
+
+                # now update the heads with our ancestors.
+
+                candidate_rev = id_to_rev[candidate]
+                assert candidate_rev is not None
+
+                heads_to_add = [
+                    r
+                    for r in candidate_rev._normalized_down_revisions
+                    if r in todo and r not in current_heads
+                ]
+
+                if not heads_to_add:
+                    # no ancestors, so remove this head from the list
+                    del current_heads[current_candidate_idx]
+                    del ancestors_by_idx[current_candidate_idx]
+                    current_candidate_idx = max(current_candidate_idx - 1, 0)
+                else:
+                    if (
+                        not candidate_rev._normalized_resolved_dependencies
+                        and len(candidate_rev._versioned_down_revisions) == 1
+                    ):
+                        current_heads[current_candidate_idx] = heads_to_add[0]
+
+                        # for plain movement down a revision line without
+                        # any mergepoints, branchpoints, or deps, we
+                        # can update the ancestors collection directly
+                        # by popping out the candidate we just emitted
+                        ancestors_by_idx[current_candidate_idx].discard(
+                            candidate
+                        )
+
+                    else:
+                        # otherwise recalculate it again, things get
+                        # complicated otherwise.  This can possibly be
+                        # improved to not run the whole ancestor thing
+                        # each time but it was getting complicated
+                        current_heads[current_candidate_idx] = heads_to_add[0]
+                        current_heads.extend(heads_to_add[1:])
+                        ancestors_by_idx[
+                            current_candidate_idx
+                        ] = get_ancestors(heads_to_add[0])
+                        ancestors_by_idx.extend(
+                            get_ancestors(head) for head in heads_to_add[1:]
+                        )
+
+        assert not todo
+        return output
+
+    def _walk(
+        self,
+        start: Optional[Union[str, Revision]],
+        steps: int,
+        branch_label: Optional[str] = None,
+        no_overwalk: bool = True,
+    ) -> Optional[_RevisionOrBase]:
+        """
+        Walk the requested number of :steps up (steps > 0) or down (steps < 0)
+        the revision tree.
+
+        :branch_label is used to select branches only when walking up.
+
+        If the walk goes past the boundaries of the tree and :no_overwalk is
+        True, None is returned, otherwise the walk terminates early.
+
+        A RevisionError is raised if there is no unambiguous revision to
+        walk to.
+        """
+        initial: Optional[_RevisionOrBase]
+        if isinstance(start, str):
+            initial = self.get_revision(start)
+        else:
+            initial = start
+
+        children: Sequence[Optional[_RevisionOrBase]]
+        for _ in range(abs(steps)):
+            if steps > 0:
+                assert initial != "base"  # type: ignore[comparison-overlap]
+                # Walk up
+                walk_up = [
+                    is_revision(rev)
+                    for rev in self.get_revisions(
+                        self.bases if initial is None else initial.nextrev
+                    )
+                ]
+                if branch_label:
+                    children = self.filter_for_lineage(walk_up, branch_label)
+                else:
+                    children = walk_up
+            else:
+                # Walk down
+                if initial == "base":  # type: ignore[comparison-overlap]
+                    children = ()
+                else:
+                    children = self.get_revisions(
+                        self.heads
+                        if initial is None
+                        else initial.down_revision
+                    )
+                    if not children:
+                        children = ("base",)
+            if not children:
+                # This will return an invalid result if no_overwalk, otherwise
+                # further steps will stay where we are.
+                ret = None if no_overwalk else initial
+                return ret
+            elif len(children) > 1:
+                raise RevisionError("Ambiguous walk")
+            initial = children[0]
+
+        return initial
+
+    def _parse_downgrade_target(
+        self,
+        current_revisions: _RevisionIdentifierType,
+        target: _RevisionIdentifierType,
+        assert_relative_length: bool,
+    ) -> Tuple[Optional[str], Optional[_RevisionOrBase]]:
+        """
+        Parse downgrade command syntax :target to retrieve the target revision
+        and branch label (if any) given the :current_revisions stamp of the
+        database.
+
+        Returns a tuple (branch_label, target_revision) where branch_label
+        is a string from the command specifying the branch to consider (or
+        None if no branch given), and target_revision is a Revision object
+        which the command refers to. target_revisions is None if the command
+        refers to 'base'. The target may be specified in absolute form, or
+        relative to :current_revisions.
+        """
+        if target is None:
+            return None, None
+        assert isinstance(
+            target, str
+        ), "Expected downgrade target in string form"
+        match = _relative_destination.match(target)
+        if match:
+            branch_label, symbol, relative = match.groups()
+            rel_int = int(relative)
+            if rel_int >= 0:
+                if symbol is None:
+                    # Downgrading to current + n is not valid.
+                    raise RevisionError(
+                        "Relative revision %s didn't "
+                        "produce %d migrations" % (relative, abs(rel_int))
+                    )
+                # Find target revision relative to given symbol.
+                rev = self._walk(
+                    symbol,
+                    rel_int,
+                    branch_label,
+                    no_overwalk=assert_relative_length,
+                )
+                if rev is None:
+                    raise RevisionError("Walked too far")
+                return branch_label, rev
+            else:
+                relative_revision = symbol is None
+                if relative_revision:
+                    # Find target revision relative to current state.
+                    if branch_label:
+                        cr_tuple = util.to_tuple(current_revisions)
+                        symbol_list: Sequence[str]
+                        symbol_list = self.filter_for_lineage(
+                            cr_tuple, branch_label
+                        )
+                        if not symbol_list:
+                            # check the case where there are multiple branches
+                            # but there is currently a single heads, since all
+                            # other branch heads are dependent of the current
+                            # single heads.
+                            all_current = cast(
+                                Set[Revision], self._get_all_current(cr_tuple)
+                            )
+                            sl_all_current = self.filter_for_lineage(
+                                all_current, branch_label
+                            )
+                            symbol_list = [
+                                r.revision if r else r  # type: ignore[misc]
+                                for r in sl_all_current
+                            ]
+
+                        assert len(symbol_list) == 1
+                        symbol = symbol_list[0]
+                    else:
+                        current_revisions = util.to_tuple(current_revisions)
+                        if not current_revisions:
+                            raise RevisionError(
+                                "Relative revision %s didn't "
+                                "produce %d migrations"
+                                % (relative, abs(rel_int))
+                            )
+                        # Have to check uniques here for duplicate rows test.
+                        if len(set(current_revisions)) > 1:
+                            util.warn(
+                                "downgrade -1 from multiple heads is "
+                                "ambiguous; "
+                                "this usage will be disallowed in a future "
+                                "release."
+                            )
+                        symbol = current_revisions[0]
+                        # Restrict iteration to just the selected branch when
+                        # ambiguous branches are involved.
+                        branch_label = symbol
+                # Walk down the tree to find downgrade target.
+                rev = self._walk(
+                    start=self.get_revision(symbol)
+                    if branch_label is None
+                    else self.get_revision("%s@%s" % (branch_label, symbol)),
+                    steps=rel_int,
+                    no_overwalk=assert_relative_length,
+                )
+                if rev is None:
+                    if relative_revision:
+                        raise RevisionError(
+                            "Relative revision %s didn't "
+                            "produce %d migrations" % (relative, abs(rel_int))
+                        )
+                    else:
+                        raise RevisionError("Walked too far")
+                return branch_label, rev
+
+        # No relative destination given, revision specified is absolute.
+        branch_label, _, symbol = target.rpartition("@")
+        if not branch_label:
+            branch_label = None
+        return branch_label, self.get_revision(symbol)
+
+    def _parse_upgrade_target(
+        self,
+        current_revisions: _RevisionIdentifierType,
+        target: _RevisionIdentifierType,
+        assert_relative_length: bool,
+    ) -> Tuple[Optional[_RevisionOrBase], ...]:
+        """
+        Parse upgrade command syntax :target to retrieve the target revision
+        and given the :current_revisions stamp of the database.
+
+        Returns a tuple of Revision objects which should be iterated/upgraded
+        to. The target may be specified in absolute form, or relative to
+        :current_revisions.
+        """
+        if isinstance(target, str):
+            match = _relative_destination.match(target)
+        else:
+            match = None
+
+        if not match:
+            # No relative destination, target is absolute.
+            return self.get_revisions(target)
+
+        current_revisions_tup: Union[str, Tuple[Optional[str], ...], None]
+        current_revisions_tup = util.to_tuple(current_revisions)
+
+        branch_label, symbol, relative_str = match.groups()
+        relative = int(relative_str)
+        if relative > 0:
+            if symbol is None:
+                if not current_revisions_tup:
+                    current_revisions_tup = (None,)
+                # Try to filter to a single target (avoid ambiguous branches).
+                start_revs = current_revisions_tup
+                if branch_label:
+                    start_revs = self.filter_for_lineage(
+                        self.get_revisions(current_revisions_tup),  # type: ignore[arg-type] # noqa: E501
+                        branch_label,
+                    )
+                    if not start_revs:
+                        # The requested branch is not a head, so we need to
+                        # backtrack to find a branchpoint.
+                        active_on_branch = self.filter_for_lineage(
+                            self._get_ancestor_nodes(
+                                self.get_revisions(current_revisions_tup)
+                            ),
+                            branch_label,
+                        )
+                        # Find the tips of this set of revisions (revisions
+                        # without children within the set).
+                        start_revs = tuple(
+                            {rev.revision for rev in active_on_branch}
+                            - {
+                                down
+                                for rev in active_on_branch
+                                for down in rev._normalized_down_revisions
+                            }
+                        )
+                        if not start_revs:
+                            # We must need to go right back to base to find
+                            # a starting point for this branch.
+                            start_revs = (None,)
+                if len(start_revs) > 1:
+                    raise RevisionError(
+                        "Ambiguous upgrade from multiple current revisions"
+                    )
+                # Walk up from unique target revision.
+                rev = self._walk(
+                    start=start_revs[0],
+                    steps=relative,
+                    branch_label=branch_label,
+                    no_overwalk=assert_relative_length,
+                )
+                if rev is None:
+                    raise RevisionError(
+                        "Relative revision %s didn't "
+                        "produce %d migrations" % (relative_str, abs(relative))
+                    )
+                return (rev,)
+            else:
+                # Walk is relative to a given revision, not the current state.
+                return (
+                    self._walk(
+                        start=self.get_revision(symbol),
+                        steps=relative,
+                        branch_label=branch_label,
+                        no_overwalk=assert_relative_length,
+                    ),
+                )
+        else:
+            if symbol is None:
+                # Upgrading to current - n is not valid.
+                raise RevisionError(
+                    "Relative revision %s didn't "
+                    "produce %d migrations" % (relative, abs(relative))
+                )
+            return (
+                self._walk(
+                    start=self.get_revision(symbol)
+                    if branch_label is None
+                    else self.get_revision("%s@%s" % (branch_label, symbol)),
+                    steps=relative,
+                    no_overwalk=assert_relative_length,
+                ),
+            )
+
+    def _collect_downgrade_revisions(
+        self,
+        upper: _RevisionIdentifierType,
+        lower: _RevisionIdentifierType,
+        inclusive: bool,
+        implicit_base: bool,
+        assert_relative_length: bool,
+    ) -> Tuple[Set[Revision], Tuple[Optional[_RevisionOrBase], ...]]:
+        """
+        Compute the set of current revisions specified by :upper, and the
+        downgrade target specified by :target. Return all dependents of target
+        which are currently active.
+
+        :inclusive=True includes the target revision in the set
+        """
+
+        branch_label, target_revision = self._parse_downgrade_target(
+            current_revisions=upper,
+            target=lower,
+            assert_relative_length=assert_relative_length,
+        )
+        if target_revision == "base":
+            target_revision = None
+        assert target_revision is None or isinstance(target_revision, Revision)
+
+        roots: List[Revision]
+        # Find candidates to drop.
+        if target_revision is None:
+            # Downgrading back to base: find all tree roots.
+            roots = [
+                rev
+                for rev in self._revision_map.values()
+                if rev is not None and rev.down_revision is None
+            ]
+        elif inclusive:
+            # inclusive implies target revision should also be dropped
+            roots = [target_revision]
+        else:
+            # Downgrading to fixed target: find all direct children.
+            roots = [
+                is_revision(rev)
+                for rev in self.get_revisions(target_revision.nextrev)
+            ]
+
+        if branch_label and len(roots) > 1:
+            # Need to filter roots.
+            ancestors = {
+                rev.revision
+                for rev in self._get_ancestor_nodes(
+                    [self._resolve_branch(branch_label)],
+                    include_dependencies=False,
+                )
+            }
+            # Intersection gives the root revisions we are trying to
+            # rollback with the downgrade.
+            roots = [
+                is_revision(rev)
+                for rev in self.get_revisions(
+                    {rev.revision for rev in roots}.intersection(ancestors)
+                )
+            ]
+
+            # Ensure we didn't throw everything away when filtering branches.
+            if len(roots) == 0:
+                raise RevisionError(
+                    "Not a valid downgrade target from current heads"
+                )
+
+        heads = self.get_revisions(upper)
+
+        # Aim is to drop :branch_revision; to do so we also need to drop its
+        # descendents and anything dependent on it.
+        downgrade_revisions = set(
+            self._get_descendant_nodes(
+                roots,
+                include_dependencies=True,
+                omit_immediate_dependencies=False,
+            )
+        )
+        active_revisions = set(
+            self._get_ancestor_nodes(heads, include_dependencies=True)
+        )
+
+        # Emit revisions to drop in reverse topological sorted order.
+        downgrade_revisions.intersection_update(active_revisions)
+
+        if implicit_base:
+            # Wind other branches back to base.
+            downgrade_revisions.update(
+                active_revisions.difference(self._get_ancestor_nodes(roots))
+            )
+
+        if (
+            target_revision is not None
+            and not downgrade_revisions
+            and target_revision not in heads
+        ):
+            # Empty intersection: target revs are not present.
+
+            raise RangeNotAncestorError("Nothing to drop", upper)
+
+        return downgrade_revisions, heads
+
+    def _collect_upgrade_revisions(
+        self,
+        upper: _RevisionIdentifierType,
+        lower: _RevisionIdentifierType,
+        inclusive: bool,
+        implicit_base: bool,
+        assert_relative_length: bool,
+    ) -> Tuple[Set[Revision], Tuple[Revision, ...]]:
+        """
+        Compute the set of required revisions specified by :upper, and the
+        current set of active revisions specified by :lower. Find the
+        difference between the two to compute the required upgrades.
+
+        :inclusive=True includes the current/lower revisions in the set
+
+        :implicit_base=False only returns revisions which are downstream
+        of the current/lower revisions. Dependencies from branches with
+        different bases will not be included.
+        """
+        targets: Collection[Revision] = [
+            is_revision(rev)
+            for rev in self._parse_upgrade_target(
+                current_revisions=lower,
+                target=upper,
+                assert_relative_length=assert_relative_length,
+            )
+        ]
+
+        # assert type(targets) is tuple, "targets should be a tuple"
+
+        # Handled named bases (e.g. branch@... -> heads should only produce
+        # targets on the given branch)
+        if isinstance(lower, str) and "@" in lower:
+            branch, _, _ = lower.partition("@")
+            branch_rev = self.get_revision(branch)
+            if branch_rev is not None and branch_rev.revision == branch:
+                # A revision was used as a label; get its branch instead
+                assert len(branch_rev.branch_labels) == 1
+                branch = next(iter(branch_rev.branch_labels))
+            targets = {
+                need for need in targets if branch in need.branch_labels
+            }
+
+        required_node_set = set(
+            self._get_ancestor_nodes(
+                targets, check=True, include_dependencies=True
+            )
+        ).union(targets)
+
+        current_revisions = self.get_revisions(lower)
+        if not implicit_base and any(
+            rev not in required_node_set
+            for rev in current_revisions
+            if rev is not None
+        ):
+            raise RangeNotAncestorError(lower, upper)
+        assert (
+            type(current_revisions) is tuple
+        ), "current_revisions should be a tuple"
+
+        # Special case where lower = a relative value (get_revisions can't
+        # find it)
+        if current_revisions and current_revisions[0] is None:
+            _, rev = self._parse_downgrade_target(
+                current_revisions=upper,
+                target=lower,
+                assert_relative_length=assert_relative_length,
+            )
+            assert rev
+            if rev == "base":
+                current_revisions = tuple()
+                lower = None
+            else:
+                current_revisions = (rev,)
+                lower = rev.revision
+
+        current_node_set = set(
+            self._get_ancestor_nodes(
+                current_revisions, check=True, include_dependencies=True
+            )
+        ).union(current_revisions)
+
+        needs = required_node_set.difference(current_node_set)
+
+        # Include the lower revision (=current_revisions?) in the iteration
+        if inclusive:
+            needs.update(is_revision(rev) for rev in self.get_revisions(lower))
+        # By default, base is implicit as we want all dependencies returned.
+        # Base is also implicit if lower = base
+        # implicit_base=False -> only return direct downstreams of
+        # current_revisions
+        if current_revisions and not implicit_base:
+            lower_descendents = self._get_descendant_nodes(
+                [is_revision(rev) for rev in current_revisions],
+                check=True,
+                include_dependencies=False,
+            )
+            needs.intersection_update(lower_descendents)
+
+        return needs, tuple(targets)
+
+    def _get_all_current(
+        self, id_: Tuple[str, ...]
+    ) -> Set[Optional[_RevisionOrBase]]:
+        top_revs: Set[Optional[_RevisionOrBase]]
+        top_revs = set(self.get_revisions(id_))
+        top_revs.update(
+            self._get_ancestor_nodes(list(top_revs), include_dependencies=True)
+        )
+        return self._filter_into_branch_heads(top_revs)
+
+
+class Revision:
+    """Base class for revisioned objects.
+
+    The :class:`.Revision` class is the base of the more public-facing
+    :class:`.Script` object, which represents a migration script.
+    The mechanics of revision management and traversal are encapsulated
+    within :class:`.Revision`, while :class:`.Script` applies this logic
+    to Python files in a version directory.
+
+    """
+
+    nextrev: FrozenSet[str] = frozenset()
+    """following revisions, based on down_revision only."""
+
+    _all_nextrev: FrozenSet[str] = frozenset()
+
+    revision: str = None  # type: ignore[assignment]
+    """The string revision number."""
+
+    down_revision: Optional[_RevIdType] = None
+    """The ``down_revision`` identifier(s) within the migration script.
+
+    Note that the total set of "down" revisions is
+    down_revision + dependencies.
+
+    """
+
+    dependencies: Optional[_RevIdType] = None
+    """Additional revisions which this revision is dependent on.
+
+    From a migration standpoint, these dependencies are added to the
+    down_revision to form the full iteration.  However, the separation
+    of down_revision from "dependencies" is to assist in navigating
+    a history that contains many branches, typically a multi-root scenario.
+
+    """
+
+    branch_labels: Set[str] = None  # type: ignore[assignment]
+    """Optional string/tuple of symbolic names to apply to this
+    revision's branch"""
+
+    _resolved_dependencies: Tuple[str, ...]
+    _normalized_resolved_dependencies: Tuple[str, ...]
+
+    @classmethod
+    def verify_rev_id(cls, revision: str) -> None:
+        illegal_chars = set(revision).intersection(_revision_illegal_chars)
+        if illegal_chars:
+            raise RevisionError(
+                "Character(s) '%s' not allowed in revision identifier '%s'"
+                % (", ".join(sorted(illegal_chars)), revision)
+            )
+
+    def __init__(
+        self,
+        revision: str,
+        down_revision: Optional[Union[str, Tuple[str, ...]]],
+        dependencies: Optional[Union[str, Tuple[str, ...]]] = None,
+        branch_labels: Optional[Union[str, Tuple[str, ...]]] = None,
+    ) -> None:
+        if down_revision and revision in util.to_tuple(down_revision):
+            raise LoopDetected(revision)
+        elif dependencies is not None and revision in util.to_tuple(
+            dependencies
+        ):
+            raise DependencyLoopDetected(revision)
+
+        self.verify_rev_id(revision)
+        self.revision = revision
+        self.down_revision = tuple_rev_as_scalar(util.to_tuple(down_revision))
+        self.dependencies = tuple_rev_as_scalar(util.to_tuple(dependencies))
+        self._orig_branch_labels = util.to_tuple(branch_labels, default=())
+        self.branch_labels = set(self._orig_branch_labels)
+
+    def __repr__(self) -> str:
+        args = [repr(self.revision), repr(self.down_revision)]
+        if self.dependencies:
+            args.append("dependencies=%r" % (self.dependencies,))
+        if self.branch_labels:
+            args.append("branch_labels=%r" % (self.branch_labels,))
+        return "%s(%s)" % (self.__class__.__name__, ", ".join(args))
+
+    def add_nextrev(self, revision: Revision) -> None:
+        self._all_nextrev = self._all_nextrev.union([revision.revision])
+        if self.revision in revision._versioned_down_revisions:
+            self.nextrev = self.nextrev.union([revision.revision])
+
+    @property
+    def _all_down_revisions(self) -> Tuple[str, ...]:
+        return util.dedupe_tuple(
+            util.to_tuple(self.down_revision, default=())
+            + self._resolved_dependencies
+        )
+
+    @property
+    def _normalized_down_revisions(self) -> Tuple[str, ...]:
+        """return immediate down revisions for a rev, omitting dependencies
+        that are still dependencies of ancestors.
+
+        """
+        return util.dedupe_tuple(
+            util.to_tuple(self.down_revision, default=())
+            + self._normalized_resolved_dependencies
+        )
+
+    @property
+    def _versioned_down_revisions(self) -> Tuple[str, ...]:
+        return util.to_tuple(self.down_revision, default=())
+
+    @property
+    def is_head(self) -> bool:
+        """Return True if this :class:`.Revision` is a 'head' revision.
+
+        This is determined based on whether any other :class:`.Script`
+        within the :class:`.ScriptDirectory` refers to this
+        :class:`.Script`.   Multiple heads can be present.
+
+        """
+        return not bool(self.nextrev)
+
+    @property
+    def _is_real_head(self) -> bool:
+        return not bool(self._all_nextrev)
+
+    @property
+    def is_base(self) -> bool:
+        """Return True if this :class:`.Revision` is a 'base' revision."""
+
+        return self.down_revision is None
+
+    @property
+    def _is_real_base(self) -> bool:
+        """Return True if this :class:`.Revision` is a "real" base revision,
+        e.g. that it has no dependencies either."""
+
+        # we use self.dependencies here because this is called up
+        # in initialization where _real_dependencies isn't set up
+        # yet
+        return self.down_revision is None and self.dependencies is None
+
+    @property
+    def is_branch_point(self) -> bool:
+        """Return True if this :class:`.Script` is a branch point.
+
+        A branchpoint is defined as a :class:`.Script` which is referred
+        to by more than one succeeding :class:`.Script`, that is more
+        than one :class:`.Script` has a `down_revision` identifier pointing
+        here.
+
+        """
+        return len(self.nextrev) > 1
+
+    @property
+    def _is_real_branch_point(self) -> bool:
+        """Return True if this :class:`.Script` is a 'real' branch point,
+        taking into account dependencies as well.
+
+        """
+        return len(self._all_nextrev) > 1
+
+    @property
+    def is_merge_point(self) -> bool:
+        """Return True if this :class:`.Script` is a merge point."""
+
+        return len(self._versioned_down_revisions) > 1
+
+
+@overload
+def tuple_rev_as_scalar(rev: None) -> None:
+    ...
+
+
+@overload
+def tuple_rev_as_scalar(
+    rev: Union[Tuple[_T, ...], List[_T]]
+) -> Union[_T, Tuple[_T, ...], List[_T]]:
+    ...
+
+
+def tuple_rev_as_scalar(
+    rev: Optional[Sequence[_T]],
+) -> Union[_T, Sequence[_T], None]:
+    if not rev:
+        return None
+    elif len(rev) == 1:
+        return rev[0]
+    else:
+        return rev
+
+
+def is_revision(rev: Any) -> Revision:
+    assert isinstance(rev, Revision)
+    return rev
diff --git a/venv/Lib/site-packages/alembic/script/write_hooks.py b/venv/Lib/site-packages/alembic/script/write_hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..9977147921055b2f5540993188ca495455a2ca7d
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/script/write_hooks.py
@@ -0,0 +1,179 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import shlex
+import subprocess
+import sys
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import List
+from typing import Mapping
+from typing import Optional
+from typing import Union
+
+from .. import util
+from ..util import compat
+
+
+REVISION_SCRIPT_TOKEN = "REVISION_SCRIPT_FILENAME"
+
+_registry: dict = {}
+
+
+def register(name: str) -> Callable:
+    """A function decorator that will register that function as a write hook.
+
+    See the documentation linked below for an example.
+
+    .. seealso::
+
+        :ref:`post_write_hooks_custom`
+
+
+    """
+
+    def decorate(fn):
+        _registry[name] = fn
+        return fn
+
+    return decorate
+
+
+def _invoke(
+    name: str, revision: str, options: Mapping[str, Union[str, int]]
+) -> Any:
+    """Invokes the formatter registered for the given name.
+
+    :param name: The name of a formatter in the registry
+    :param revision: A :class:`.MigrationRevision` instance
+    :param options: A dict containing kwargs passed to the
+        specified formatter.
+    :raises: :class:`alembic.util.CommandError`
+    """
+    try:
+        hook = _registry[name]
+    except KeyError as ke:
+        raise util.CommandError(
+            f"No formatter with name '{name}' registered"
+        ) from ke
+    else:
+        return hook(revision, options)
+
+
+def _run_hooks(path: str, hook_config: Mapping[str, str]) -> None:
+    """Invoke hooks for a generated revision."""
+
+    from .base import _split_on_space_comma
+
+    names = _split_on_space_comma.split(hook_config.get("hooks", ""))
+
+    for name in names:
+        if not name:
+            continue
+        opts = {
+            key[len(name) + 1 :]: hook_config[key]
+            for key in hook_config
+            if key.startswith(name + ".")
+        }
+        opts["_hook_name"] = name
+        try:
+            type_ = opts["type"]
+        except KeyError as ke:
+            raise util.CommandError(
+                f"Key {name}.type is required for post write hook {name!r}"
+            ) from ke
+        else:
+            with util.status(
+                f"Running post write hook {name!r}", newline=True
+            ):
+                _invoke(type_, path, opts)
+
+
+def _parse_cmdline_options(cmdline_options_str: str, path: str) -> List[str]:
+    """Parse options from a string into a list.
+
+    Also substitutes the revision script token with the actual filename of
+    the revision script.
+
+    If the revision script token doesn't occur in the options string, it is
+    automatically prepended.
+    """
+    if REVISION_SCRIPT_TOKEN not in cmdline_options_str:
+        cmdline_options_str = REVISION_SCRIPT_TOKEN + " " + cmdline_options_str
+    cmdline_options_list = shlex.split(
+        cmdline_options_str, posix=compat.is_posix
+    )
+    cmdline_options_list = [
+        option.replace(REVISION_SCRIPT_TOKEN, path)
+        for option in cmdline_options_list
+    ]
+    return cmdline_options_list
+
+
+@register("console_scripts")
+def console_scripts(
+    path: str, options: dict, ignore_output: bool = False
+) -> None:
+    try:
+        entrypoint_name = options["entrypoint"]
+    except KeyError as ke:
+        raise util.CommandError(
+            f"Key {options['_hook_name']}.entrypoint is required for post "
+            f"write hook {options['_hook_name']!r}"
+        ) from ke
+    for entry in compat.importlib_metadata_get("console_scripts"):
+        if entry.name == entrypoint_name:
+            impl: Any = entry
+            break
+    else:
+        raise util.CommandError(
+            f"Could not find entrypoint console_scripts.{entrypoint_name}"
+        )
+    cwd: Optional[str] = options.get("cwd", None)
+    cmdline_options_str = options.get("options", "")
+    cmdline_options_list = _parse_cmdline_options(cmdline_options_str, path)
+
+    kw: Dict[str, Any] = {}
+    if ignore_output:
+        kw["stdout"] = kw["stderr"] = subprocess.DEVNULL
+
+    subprocess.run(
+        [
+            sys.executable,
+            "-c",
+            f"import {impl.module}; {impl.module}.{impl.attr}()",
+        ]
+        + cmdline_options_list,
+        cwd=cwd,
+        **kw,
+    )
+
+
+@register("exec")
+def exec_(path: str, options: dict, ignore_output: bool = False) -> None:
+    try:
+        executable = options["executable"]
+    except KeyError as ke:
+        raise util.CommandError(
+            f"Key {options['_hook_name']}.executable is required for post "
+            f"write hook {options['_hook_name']!r}"
+        ) from ke
+    cwd: Optional[str] = options.get("cwd", None)
+    cmdline_options_str = options.get("options", "")
+    cmdline_options_list = _parse_cmdline_options(cmdline_options_str, path)
+
+    kw: Dict[str, Any] = {}
+    if ignore_output:
+        kw["stdout"] = kw["stderr"] = subprocess.DEVNULL
+
+    subprocess.run(
+        [
+            executable,
+            *cmdline_options_list,
+        ],
+        cwd=cwd,
+        **kw,
+    )
diff --git a/venv/Lib/site-packages/alembic/templates/async/README b/venv/Lib/site-packages/alembic/templates/async/README
new file mode 100644
index 0000000000000000000000000000000000000000..e0d0858f266ec27b243e8b92301fc7002e1f2745
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/async/README
@@ -0,0 +1 @@
+Generic single-database configuration with an async dbapi.
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic/templates/async/__pycache__/env.cpython-311.pyc b/venv/Lib/site-packages/alembic/templates/async/__pycache__/env.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef58e510aa124a47b854e3fa45ee3762b70b3e7a
Binary files /dev/null and b/venv/Lib/site-packages/alembic/templates/async/__pycache__/env.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/templates/async/alembic.ini.mako b/venv/Lib/site-packages/alembic/templates/async/alembic.ini.mako
new file mode 100644
index 0000000000000000000000000000000000000000..0e5f43fdefbf43aab57005b532381e50abcca2fc
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/async/alembic.ini.mako
@@ -0,0 +1,114 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts
+script_location = ${script_location}
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the
+# "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to ${script_location}/versions.  When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+version_path_separator = os  # Use os.pathsep. Default configuration used for new projects.
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts.  See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/alembic/templates/async/env.py b/venv/Lib/site-packages/alembic/templates/async/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f2d51940080a1a7c954a8916dab86f00d5a4aa5
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/async/env.py
@@ -0,0 +1,89 @@
+import asyncio
+from logging.config import fileConfig
+
+from sqlalchemy import pool
+from sqlalchemy.engine import Connection
+from sqlalchemy.ext.asyncio import async_engine_from_config
+
+from alembic import context
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+    fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = None
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    url = config.get_main_option("sqlalchemy.url")
+    context.configure(
+        url=url,
+        target_metadata=target_metadata,
+        literal_binds=True,
+        dialect_opts={"paramstyle": "named"},
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+def do_run_migrations(connection: Connection) -> None:
+    context.configure(connection=connection, target_metadata=target_metadata)
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+async def run_async_migrations() -> None:
+    """In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    connectable = async_engine_from_config(
+        config.get_section(config.config_ini_section, {}),
+        prefix="sqlalchemy.",
+        poolclass=pool.NullPool,
+    )
+
+    async with connectable.connect() as connection:
+        await connection.run_sync(do_run_migrations)
+
+    await connectable.dispose()
+
+
+def run_migrations_online() -> None:
+    """Run migrations in 'online' mode."""
+
+    asyncio.run(run_async_migrations())
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/venv/Lib/site-packages/alembic/templates/async/script.py.mako b/venv/Lib/site-packages/alembic/templates/async/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..fbc4b07dcef98b20c6f96b642097f35e8433258e
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/async/script.py.mako
@@ -0,0 +1,26 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+    ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+    ${downgrades if downgrades else "pass"}
diff --git a/venv/Lib/site-packages/alembic/templates/generic/README b/venv/Lib/site-packages/alembic/templates/generic/README
new file mode 100644
index 0000000000000000000000000000000000000000..98e4f9c44effe479ed38c66ba922e7bcc672916f
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/generic/README
@@ -0,0 +1 @@
+Generic single-database configuration.
\ No newline at end of file
diff --git a/venv/Lib/site-packages/alembic/templates/generic/__pycache__/env.cpython-311.pyc b/venv/Lib/site-packages/alembic/templates/generic/__pycache__/env.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1e79165b4b670d80b5fbd7ebf1ffe57bd82df62
Binary files /dev/null and b/venv/Lib/site-packages/alembic/templates/generic/__pycache__/env.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/templates/generic/alembic.ini.mako b/venv/Lib/site-packages/alembic/templates/generic/alembic.ini.mako
new file mode 100644
index 0000000000000000000000000000000000000000..29245dd3f5aef640935e62bb9dc305f0130c465b
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/generic/alembic.ini.mako
@@ -0,0 +1,116 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts
+script_location = ${script_location}
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
+# for all available tokens
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the
+# "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to ${script_location}/versions.  When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+version_path_separator = os  # Use os.pathsep. Default configuration used for new projects.
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts.  See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/alembic/templates/generic/env.py b/venv/Lib/site-packages/alembic/templates/generic/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..36112a3c68590d6a8e07fea0ce70a5afb38c951a
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/generic/env.py
@@ -0,0 +1,78 @@
+from logging.config import fileConfig
+
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+
+from alembic import context
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+    fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = None
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    url = config.get_main_option("sqlalchemy.url")
+    context.configure(
+        url=url,
+        target_metadata=target_metadata,
+        literal_binds=True,
+        dialect_opts={"paramstyle": "named"},
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+def run_migrations_online() -> None:
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+    connectable = engine_from_config(
+        config.get_section(config.config_ini_section, {}),
+        prefix="sqlalchemy.",
+        poolclass=pool.NullPool,
+    )
+
+    with connectable.connect() as connection:
+        context.configure(
+            connection=connection, target_metadata=target_metadata
+        )
+
+        with context.begin_transaction():
+            context.run_migrations()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/venv/Lib/site-packages/alembic/templates/generic/script.py.mako b/venv/Lib/site-packages/alembic/templates/generic/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..fbc4b07dcef98b20c6f96b642097f35e8433258e
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/generic/script.py.mako
@@ -0,0 +1,26 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+    ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+    ${downgrades if downgrades else "pass"}
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/README b/venv/Lib/site-packages/alembic/templates/multidb/README
new file mode 100644
index 0000000000000000000000000000000000000000..f046ec91427e2f4edd53dcb5409b21955c8fd0af
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/multidb/README
@@ -0,0 +1,12 @@
+Rudimentary multi-database configuration.
+
+Multi-DB isn't vastly different from generic. The primary difference is that it
+will run the migrations N times (depending on how many databases you have
+configured), providing one engine name and associated context for each run.
+
+That engine name will then allow the migration to restrict what runs within it to
+just the appropriate migrations for that engine. You can see this behavior within
+the mako template.
+
+In the provided configuration, you'll need to have `databases` provided in
+alembic's config, and an `sqlalchemy.url` provided for each engine name.
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/__pycache__/env.cpython-311.pyc b/venv/Lib/site-packages/alembic/templates/multidb/__pycache__/env.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1db18d1db8af3f1b8beea27330c5efe6fd424fd7
Binary files /dev/null and b/venv/Lib/site-packages/alembic/templates/multidb/__pycache__/env.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/alembic.ini.mako b/venv/Lib/site-packages/alembic/templates/multidb/alembic.ini.mako
new file mode 100644
index 0000000000000000000000000000000000000000..c7fbe48228a4c0af4f121bf8cc984ea7f889af7c
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/multidb/alembic.ini.mako
@@ -0,0 +1,121 @@
+# a multi-database configuration.
+
+[alembic]
+# path to migration scripts
+script_location = ${script_location}
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
+# for all available tokens
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the
+# "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to ${script_location}/versions.  When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:${script_location}/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+version_path_separator = os  # Use os.pathsep. Default configuration used for new projects.
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+databases = engine1, engine2
+
+[engine1]
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+[engine2]
+sqlalchemy.url = driver://user:pass@localhost/dbname2
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts.  See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/env.py b/venv/Lib/site-packages/alembic/templates/multidb/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..e937b64eeed2fd980c214ab87505d75041b581ad
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/multidb/env.py
@@ -0,0 +1,140 @@
+import logging
+from logging.config import fileConfig
+import re
+
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+
+from alembic import context
+
+USE_TWOPHASE = False
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+    fileConfig(config.config_file_name)
+logger = logging.getLogger("alembic.env")
+
+# gather section names referring to different
+# databases.  These are named "engine1", "engine2"
+# in the sample .ini file.
+db_names = config.get_main_option("databases", "")
+
+# add your model's MetaData objects here
+# for 'autogenerate' support.  These must be set
+# up to hold just those tables targeting a
+# particular database. table.tometadata() may be
+# helpful here in case a "copy" of
+# a MetaData is needed.
+# from myapp import mymodel
+# target_metadata = {
+#       'engine1':mymodel.metadata1,
+#       'engine2':mymodel.metadata2
+# }
+target_metadata = {}
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    # for the --sql use case, run migrations for each URL into
+    # individual files.
+
+    engines = {}
+    for name in re.split(r",\s*", db_names):
+        engines[name] = rec = {}
+        rec["url"] = context.config.get_section_option(name, "sqlalchemy.url")
+
+    for name, rec in engines.items():
+        logger.info("Migrating database %s" % name)
+        file_ = "%s.sql" % name
+        logger.info("Writing output to %s" % file_)
+        with open(file_, "w") as buffer:
+            context.configure(
+                url=rec["url"],
+                output_buffer=buffer,
+                target_metadata=target_metadata.get(name),
+                literal_binds=True,
+                dialect_opts={"paramstyle": "named"},
+            )
+            with context.begin_transaction():
+                context.run_migrations(engine_name=name)
+
+
+def run_migrations_online() -> None:
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    # for the direct-to-DB use case, start a transaction on all
+    # engines, then run all migrations, then commit all transactions.
+
+    engines = {}
+    for name in re.split(r",\s*", db_names):
+        engines[name] = rec = {}
+        rec["engine"] = engine_from_config(
+            context.config.get_section(name, {}),
+            prefix="sqlalchemy.",
+            poolclass=pool.NullPool,
+        )
+
+    for name, rec in engines.items():
+        engine = rec["engine"]
+        rec["connection"] = conn = engine.connect()
+
+        if USE_TWOPHASE:
+            rec["transaction"] = conn.begin_twophase()
+        else:
+            rec["transaction"] = conn.begin()
+
+    try:
+        for name, rec in engines.items():
+            logger.info("Migrating database %s" % name)
+            context.configure(
+                connection=rec["connection"],
+                upgrade_token="%s_upgrades" % name,
+                downgrade_token="%s_downgrades" % name,
+                target_metadata=target_metadata.get(name),
+            )
+            context.run_migrations(engine_name=name)
+
+        if USE_TWOPHASE:
+            for rec in engines.values():
+                rec["transaction"].prepare()
+
+        for rec in engines.values():
+            rec["transaction"].commit()
+    except:
+        for rec in engines.values():
+            rec["transaction"].rollback()
+        raise
+    finally:
+        for rec in engines.values():
+            rec["connection"].close()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/venv/Lib/site-packages/alembic/templates/multidb/script.py.mako b/venv/Lib/site-packages/alembic/templates/multidb/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..6108b8a0dc2a1bef8f8b25fda188ab61ef9bdaf9
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/templates/multidb/script.py.mako
@@ -0,0 +1,47 @@
+<%!
+import re
+
+%>"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade(engine_name: str) -> None:
+    globals()["upgrade_%s" % engine_name]()
+
+
+def downgrade(engine_name: str) -> None:
+    globals()["downgrade_%s" % engine_name]()
+
+<%
+    db_names = config.get_main_option("databases")
+%>
+
+## generate an "upgrade_<xyz>() / downgrade_<xyz>()" function
+## for each database name in the ini file.
+
+% for db_name in re.split(r',\s*', db_names):
+
+def upgrade_${db_name}() -> None:
+    ${context.get("%s_upgrades" % db_name, "pass")}
+
+
+def downgrade_${db_name}() -> None:
+    ${context.get("%s_downgrades" % db_name, "pass")}
+
+% endfor
diff --git a/venv/Lib/site-packages/alembic/testing/__init__.py b/venv/Lib/site-packages/alembic/testing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0407adfe9ceb5ed7071d8fac48813e664468a71b
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/__init__.py
@@ -0,0 +1,29 @@
+from sqlalchemy.testing import config
+from sqlalchemy.testing import emits_warning
+from sqlalchemy.testing import engines
+from sqlalchemy.testing import exclusions
+from sqlalchemy.testing import mock
+from sqlalchemy.testing import provide_metadata
+from sqlalchemy.testing import skip_if
+from sqlalchemy.testing import uses_deprecated
+from sqlalchemy.testing.config import combinations
+from sqlalchemy.testing.config import fixture
+from sqlalchemy.testing.config import requirements as requires
+
+from .assertions import assert_raises
+from .assertions import assert_raises_message
+from .assertions import emits_python_deprecation_warning
+from .assertions import eq_
+from .assertions import eq_ignore_whitespace
+from .assertions import expect_raises
+from .assertions import expect_raises_message
+from .assertions import expect_sqlalchemy_deprecated
+from .assertions import expect_sqlalchemy_deprecated_20
+from .assertions import expect_warnings
+from .assertions import is_
+from .assertions import is_false
+from .assertions import is_not_
+from .assertions import is_true
+from .assertions import ne_
+from .fixtures import TestBase
+from .util import resolve_lambda
diff --git a/venv/Lib/site-packages/alembic/testing/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f6ce1e4bc43b4250ffa19fafffc3995d002dd9c
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/__pycache__/assertions.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/__pycache__/assertions.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f35647670b66d45c64e60c6992d11c542b32c74
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/__pycache__/assertions.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/__pycache__/env.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/__pycache__/env.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..46d073cceed168f81123b04f5a4593866b29b5ac
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/__pycache__/env.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/__pycache__/fixtures.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/__pycache__/fixtures.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..def97b3d857213c04d5a42c50ce6669351a33598
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/__pycache__/fixtures.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/__pycache__/requirements.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/__pycache__/requirements.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0d82bf8f8b9af5c4e8d82d3bf2cc82b4190150c5
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/__pycache__/requirements.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/__pycache__/schemacompare.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/__pycache__/schemacompare.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02c2a47479f86f5ccf4287591ed32ffc9e1db3c6
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/__pycache__/schemacompare.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/__pycache__/util.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/__pycache__/util.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3137858b2445afd9a72f95912853586b0f4a1afa
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/__pycache__/util.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/__pycache__/warnings.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/__pycache__/warnings.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c0e9f82e68fe5a66fbe0f1be488bb621c0897fb
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/__pycache__/warnings.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/assertions.py b/venv/Lib/site-packages/alembic/testing/assertions.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec9593b713656d7c9a4097c32b5d84b0b570069f
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/assertions.py
@@ -0,0 +1,167 @@
+from __future__ import annotations
+
+import contextlib
+import re
+import sys
+from typing import Any
+from typing import Dict
+
+from sqlalchemy import exc as sa_exc
+from sqlalchemy.engine import default
+from sqlalchemy.testing.assertions import _expect_warnings
+from sqlalchemy.testing.assertions import eq_  # noqa
+from sqlalchemy.testing.assertions import is_  # noqa
+from sqlalchemy.testing.assertions import is_false  # noqa
+from sqlalchemy.testing.assertions import is_not_  # noqa
+from sqlalchemy.testing.assertions import is_true  # noqa
+from sqlalchemy.testing.assertions import ne_  # noqa
+from sqlalchemy.util import decorator
+
+from ..util import sqla_compat
+
+
+def _assert_proper_exception_context(exception):
+    """assert that any exception we're catching does not have a __context__
+    without a __cause__, and that __suppress_context__ is never set.
+
+    Python 3 will report nested as exceptions as "during the handling of
+    error X, error Y occurred". That's not what we want to do.  we want
+    these exceptions in a cause chain.
+
+    """
+
+    if (
+        exception.__context__ is not exception.__cause__
+        and not exception.__suppress_context__
+    ):
+        assert False, (
+            "Exception %r was correctly raised but did not set a cause, "
+            "within context %r as its cause."
+            % (exception, exception.__context__)
+        )
+
+
+def assert_raises(except_cls, callable_, *args, **kw):
+    return _assert_raises(except_cls, callable_, args, kw, check_context=True)
+
+
+def assert_raises_context_ok(except_cls, callable_, *args, **kw):
+    return _assert_raises(except_cls, callable_, args, kw)
+
+
+def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
+    return _assert_raises(
+        except_cls, callable_, args, kwargs, msg=msg, check_context=True
+    )
+
+
+def assert_raises_message_context_ok(
+    except_cls, msg, callable_, *args, **kwargs
+):
+    return _assert_raises(except_cls, callable_, args, kwargs, msg=msg)
+
+
+def _assert_raises(
+    except_cls, callable_, args, kwargs, msg=None, check_context=False
+):
+    with _expect_raises(except_cls, msg, check_context) as ec:
+        callable_(*args, **kwargs)
+    return ec.error
+
+
+class _ErrorContainer:
+    error: Any = None
+
+
+@contextlib.contextmanager
+def _expect_raises(except_cls, msg=None, check_context=False):
+    ec = _ErrorContainer()
+    if check_context:
+        are_we_already_in_a_traceback = sys.exc_info()[0]
+    try:
+        yield ec
+        success = False
+    except except_cls as err:
+        ec.error = err
+        success = True
+        if msg is not None:
+            assert re.search(msg, str(err), re.UNICODE), f"{msg} !~ {err}"
+        if check_context and not are_we_already_in_a_traceback:
+            _assert_proper_exception_context(err)
+        print(str(err).encode("utf-8"))
+
+    # assert outside the block so it works for AssertionError too !
+    assert success, "Callable did not raise an exception"
+
+
+def expect_raises(except_cls, check_context=True):
+    return _expect_raises(except_cls, check_context=check_context)
+
+
+def expect_raises_message(except_cls, msg, check_context=True):
+    return _expect_raises(except_cls, msg=msg, check_context=check_context)
+
+
+def eq_ignore_whitespace(a, b, msg=None):
+    a = re.sub(r"^\s+?|\n", "", a)
+    a = re.sub(r" {2,}", " ", a)
+    b = re.sub(r"^\s+?|\n", "", b)
+    b = re.sub(r" {2,}", " ", b)
+
+    assert a == b, msg or "%r != %r" % (a, b)
+
+
+_dialect_mods: Dict[Any, Any] = {}
+
+
+def _get_dialect(name):
+    if name is None or name == "default":
+        return default.DefaultDialect()
+    else:
+        d = sqla_compat._create_url(name).get_dialect()()
+
+        if name == "postgresql":
+            d.implicit_returning = True
+        elif name == "mssql":
+            d.legacy_schema_aliasing = False
+        return d
+
+
+def expect_warnings(*messages, **kw):
+    """Context manager which expects one or more warnings.
+
+    With no arguments, squelches all SAWarnings emitted via
+    sqlalchemy.util.warn and sqlalchemy.util.warn_limited.   Otherwise
+    pass string expressions that will match selected warnings via regex;
+    all non-matching warnings are sent through.
+
+    The expect version **asserts** that the warnings were in fact seen.
+
+    Note that the test suite sets SAWarning warnings to raise exceptions.
+
+    """
+    return _expect_warnings(Warning, messages, **kw)
+
+
+def emits_python_deprecation_warning(*messages):
+    """Decorator form of expect_warnings().
+
+    Note that emits_warning does **not** assert that the warnings
+    were in fact seen.
+
+    """
+
+    @decorator
+    def decorate(fn, *args, **kw):
+        with _expect_warnings(DeprecationWarning, assert_=False, *messages):
+            return fn(*args, **kw)
+
+    return decorate
+
+
+def expect_sqlalchemy_deprecated(*messages, **kw):
+    return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw)
+
+
+def expect_sqlalchemy_deprecated_20(*messages, **kw):
+    return _expect_warnings(sa_exc.RemovedIn20Warning, messages, **kw)
diff --git a/venv/Lib/site-packages/alembic/testing/env.py b/venv/Lib/site-packages/alembic/testing/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..5df7ef8227dd89ac97aaf5115e9d85339223a14d
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/env.py
@@ -0,0 +1,518 @@
+import importlib.machinery
+import os
+import shutil
+import textwrap
+
+from sqlalchemy.testing import config
+from sqlalchemy.testing import provision
+
+from . import util as testing_util
+from .. import command
+from .. import script
+from .. import util
+from ..script import Script
+from ..script import ScriptDirectory
+
+
+def _get_staging_directory():
+    if provision.FOLLOWER_IDENT:
+        return "scratch_%s" % provision.FOLLOWER_IDENT
+    else:
+        return "scratch"
+
+
+def staging_env(create=True, template="generic", sourceless=False):
+    cfg = _testing_config()
+    if create:
+        path = os.path.join(_get_staging_directory(), "scripts")
+        assert not os.path.exists(path), (
+            "staging directory %s already exists; poor cleanup?" % path
+        )
+
+        command.init(cfg, path, template=template)
+        if sourceless:
+            try:
+                # do an import so that a .pyc/.pyo is generated.
+                util.load_python_file(path, "env.py")
+            except AttributeError:
+                # we don't have the migration context set up yet
+                # so running the .env py throws this exception.
+                # theoretically we could be using py_compiler here to
+                # generate .pyc/.pyo without importing but not really
+                # worth it.
+                pass
+            assert sourceless in (
+                "pep3147_envonly",
+                "simple",
+                "pep3147_everything",
+            ), sourceless
+            make_sourceless(
+                os.path.join(path, "env.py"),
+                "pep3147" if "pep3147" in sourceless else "simple",
+            )
+
+    sc = script.ScriptDirectory.from_config(cfg)
+    return sc
+
+
+def clear_staging_env():
+    from sqlalchemy.testing import engines
+
+    engines.testing_reaper.close_all()
+    shutil.rmtree(_get_staging_directory(), True)
+
+
+def script_file_fixture(txt):
+    dir_ = os.path.join(_get_staging_directory(), "scripts")
+    path = os.path.join(dir_, "script.py.mako")
+    with open(path, "w") as f:
+        f.write(txt)
+
+
+def env_file_fixture(txt):
+    dir_ = os.path.join(_get_staging_directory(), "scripts")
+    txt = (
+        """
+from alembic import context
+
+config = context.config
+"""
+        + txt
+    )
+
+    path = os.path.join(dir_, "env.py")
+    pyc_path = util.pyc_file_from_path(path)
+    if pyc_path:
+        os.unlink(pyc_path)
+
+    with open(path, "w") as f:
+        f.write(txt)
+
+
+def _sqlite_file_db(tempname="foo.db", future=False, scope=None, **options):
+    dir_ = os.path.join(_get_staging_directory(), "scripts")
+    url = "sqlite:///%s/%s" % (dir_, tempname)
+    if scope and util.sqla_14:
+        options["scope"] = scope
+    return testing_util.testing_engine(url=url, future=future, options=options)
+
+
+def _sqlite_testing_config(sourceless=False, future=False):
+    dir_ = os.path.join(_get_staging_directory(), "scripts")
+    url = "sqlite:///%s/foo.db" % dir_
+
+    sqlalchemy_future = future or ("future" in config.db.__class__.__module__)
+
+    return _write_config_file(
+        """
+[alembic]
+script_location = %s
+sqlalchemy.url = %s
+sourceless = %s
+%s
+
+[loggers]
+keys = root,sqlalchemy
+
+[handlers]
+keys = console
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = DEBUG
+handlers =
+qualname = sqlalchemy.engine
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatters]
+keys = generic
+
+[formatter_generic]
+format = %%(levelname)-5.5s [%%(name)s] %%(message)s
+datefmt = %%H:%%M:%%S
+    """
+        % (
+            dir_,
+            url,
+            "true" if sourceless else "false",
+            "sqlalchemy.future = true" if sqlalchemy_future else "",
+        )
+    )
+
+
+def _multi_dir_testing_config(sourceless=False, extra_version_location=""):
+    dir_ = os.path.join(_get_staging_directory(), "scripts")
+    sqlalchemy_future = "future" in config.db.__class__.__module__
+
+    url = "sqlite:///%s/foo.db" % dir_
+
+    return _write_config_file(
+        """
+[alembic]
+script_location = %s
+sqlalchemy.url = %s
+sqlalchemy.future = %s
+sourceless = %s
+version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s
+
+[loggers]
+keys = root
+
+[handlers]
+keys = console
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatters]
+keys = generic
+
+[formatter_generic]
+format = %%(levelname)-5.5s [%%(name)s] %%(message)s
+datefmt = %%H:%%M:%%S
+    """
+        % (
+            dir_,
+            url,
+            "true" if sqlalchemy_future else "false",
+            "true" if sourceless else "false",
+            extra_version_location,
+        )
+    )
+
+
+def _no_sql_testing_config(dialect="postgresql", directives=""):
+    """use a postgresql url with no host so that
+    connections guaranteed to fail"""
+    dir_ = os.path.join(_get_staging_directory(), "scripts")
+    return _write_config_file(
+        """
+[alembic]
+script_location = %s
+sqlalchemy.url = %s://
+%s
+
+[loggers]
+keys = root
+
+[handlers]
+keys = console
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatters]
+keys = generic
+
+[formatter_generic]
+format = %%(levelname)-5.5s [%%(name)s] %%(message)s
+datefmt = %%H:%%M:%%S
+
+"""
+        % (dir_, dialect, directives)
+    )
+
+
+def _write_config_file(text):
+    cfg = _testing_config()
+    with open(cfg.config_file_name, "w") as f:
+        f.write(text)
+    return cfg
+
+
+def _testing_config():
+    from alembic.config import Config
+
+    if not os.access(_get_staging_directory(), os.F_OK):
+        os.mkdir(_get_staging_directory())
+    return Config(os.path.join(_get_staging_directory(), "test_alembic.ini"))
+
+
+def write_script(
+    scriptdir, rev_id, content, encoding="ascii", sourceless=False
+):
+    old = scriptdir.revision_map.get_revision(rev_id)
+    path = old.path
+
+    content = textwrap.dedent(content)
+    if encoding:
+        content = content.encode(encoding)
+    with open(path, "wb") as fp:
+        fp.write(content)
+    pyc_path = util.pyc_file_from_path(path)
+    if pyc_path:
+        os.unlink(pyc_path)
+    script = Script._from_path(scriptdir, path)
+    old = scriptdir.revision_map.get_revision(script.revision)
+    if old.down_revision != script.down_revision:
+        raise Exception(
+            "Can't change down_revision " "on a refresh operation."
+        )
+    scriptdir.revision_map.add_revision(script, _replace=True)
+
+    if sourceless:
+        make_sourceless(
+            path, "pep3147" if sourceless == "pep3147_everything" else "simple"
+        )
+
+
+def make_sourceless(path, style):
+    import py_compile
+
+    py_compile.compile(path)
+
+    if style == "simple":
+        pyc_path = util.pyc_file_from_path(path)
+        suffix = importlib.machinery.BYTECODE_SUFFIXES[0]
+        filepath, ext = os.path.splitext(path)
+        simple_pyc_path = filepath + suffix
+        shutil.move(pyc_path, simple_pyc_path)
+        pyc_path = simple_pyc_path
+    else:
+        assert style in ("pep3147", "simple")
+        pyc_path = util.pyc_file_from_path(path)
+
+    assert os.access(pyc_path, os.F_OK)
+
+    os.unlink(path)
+
+
+def three_rev_fixture(cfg):
+    a = util.rev_id()
+    b = util.rev_id()
+    c = util.rev_id()
+
+    script = ScriptDirectory.from_config(cfg)
+    script.generate_revision(a, "revision a", refresh=True, head="base")
+    write_script(
+        script,
+        a,
+        """\
+"Rev A"
+revision = '%s'
+down_revision = None
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 1")
+
+
+def downgrade():
+    op.execute("DROP STEP 1")
+
+"""
+        % a,
+    )
+
+    script.generate_revision(b, "revision b", refresh=True, head=a)
+    write_script(
+        script,
+        b,
+        f"""# coding: utf-8
+"Rev B, méil, %3"
+revision = '{b}'
+down_revision = '{a}'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 2")
+
+
+def downgrade():
+    op.execute("DROP STEP 2")
+
+""",
+        encoding="utf-8",
+    )
+
+    script.generate_revision(c, "revision c", refresh=True, head=b)
+    write_script(
+        script,
+        c,
+        """\
+"Rev C"
+revision = '%s'
+down_revision = '%s'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 3")
+
+
+def downgrade():
+    op.execute("DROP STEP 3")
+
+"""
+        % (c, b),
+    )
+    return a, b, c
+
+
+def multi_heads_fixture(cfg, a, b, c):
+    """Create a multiple head fixture from the three-revs fixture"""
+
+    # a->b->c
+    #     -> d -> e
+    #     -> f
+    d = util.rev_id()
+    e = util.rev_id()
+    f = util.rev_id()
+
+    script = ScriptDirectory.from_config(cfg)
+    script.generate_revision(
+        d, "revision d from b", head=b, splice=True, refresh=True
+    )
+    write_script(
+        script,
+        d,
+        """\
+"Rev D"
+revision = '%s'
+down_revision = '%s'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 4")
+
+
+def downgrade():
+    op.execute("DROP STEP 4")
+
+"""
+        % (d, b),
+    )
+
+    script.generate_revision(
+        e, "revision e from d", head=d, splice=True, refresh=True
+    )
+    write_script(
+        script,
+        e,
+        """\
+"Rev E"
+revision = '%s'
+down_revision = '%s'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 5")
+
+
+def downgrade():
+    op.execute("DROP STEP 5")
+
+"""
+        % (e, d),
+    )
+
+    script.generate_revision(
+        f, "revision f from b", head=b, splice=True, refresh=True
+    )
+    write_script(
+        script,
+        f,
+        """\
+"Rev F"
+revision = '%s'
+down_revision = '%s'
+
+from alembic import op
+
+
+def upgrade():
+    op.execute("CREATE STEP 6")
+
+
+def downgrade():
+    op.execute("DROP STEP 6")
+
+"""
+        % (f, b),
+    )
+
+    return d, e, f
+
+
+def _multidb_testing_config(engines):
+    """alembic.ini fixture to work exactly with the 'multidb' template"""
+
+    dir_ = os.path.join(_get_staging_directory(), "scripts")
+
+    sqlalchemy_future = "future" in config.db.__class__.__module__
+
+    databases = ", ".join(engines.keys())
+    engines = "\n\n".join(
+        "[%s]\n" "sqlalchemy.url = %s" % (key, value.url)
+        for key, value in engines.items()
+    )
+
+    return _write_config_file(
+        """
+[alembic]
+script_location = %s
+sourceless = false
+sqlalchemy.future = %s
+databases = %s
+
+%s
+[loggers]
+keys = root
+
+[handlers]
+keys = console
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatters]
+keys = generic
+
+[formatter_generic]
+format = %%(levelname)-5.5s [%%(name)s] %%(message)s
+datefmt = %%H:%%M:%%S
+    """
+        % (dir_, "true" if sqlalchemy_future else "false", databases, engines)
+    )
diff --git a/venv/Lib/site-packages/alembic/testing/fixtures.py b/venv/Lib/site-packages/alembic/testing/fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b83a745f3f5dbafbd0a0c073635122f888e797b
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/fixtures.py
@@ -0,0 +1,306 @@
+from __future__ import annotations
+
+import configparser
+from contextlib import contextmanager
+import io
+import re
+from typing import Any
+from typing import Dict
+
+from sqlalchemy import Column
+from sqlalchemy import inspect
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import testing
+from sqlalchemy import text
+from sqlalchemy.testing import config
+from sqlalchemy.testing import mock
+from sqlalchemy.testing.assertions import eq_
+from sqlalchemy.testing.fixtures import TablesTest as SQLAlchemyTablesTest
+from sqlalchemy.testing.fixtures import TestBase as SQLAlchemyTestBase
+
+import alembic
+from .assertions import _get_dialect
+from ..environment import EnvironmentContext
+from ..migration import MigrationContext
+from ..operations import Operations
+from ..util import sqla_compat
+from ..util.sqla_compat import create_mock_engine
+from ..util.sqla_compat import sqla_14
+from ..util.sqla_compat import sqla_2
+
+
+testing_config = configparser.ConfigParser()
+testing_config.read(["test.cfg"])
+
+
+class TestBase(SQLAlchemyTestBase):
+    is_sqlalchemy_future = sqla_2
+
+    @testing.fixture()
+    def ops_context(self, migration_context):
+        with migration_context.begin_transaction(_per_migration=True):
+            yield Operations(migration_context)
+
+    @testing.fixture
+    def migration_context(self, connection):
+        return MigrationContext.configure(
+            connection, opts=dict(transaction_per_migration=True)
+        )
+
+    @testing.fixture
+    def connection(self):
+        with config.db.connect() as conn:
+            yield conn
+
+
+class TablesTest(TestBase, SQLAlchemyTablesTest):
+    pass
+
+
+if sqla_14:
+    from sqlalchemy.testing.fixtures import FutureEngineMixin
+else:
+
+    class FutureEngineMixin:  # type:ignore[no-redef]
+        __requires__ = ("sqlalchemy_14",)
+
+
+FutureEngineMixin.is_sqlalchemy_future = True
+
+
+def capture_db(dialect="postgresql://"):
+    buf = []
+
+    def dump(sql, *multiparams, **params):
+        buf.append(str(sql.compile(dialect=engine.dialect)))
+
+    engine = create_mock_engine(dialect, dump)
+    return engine, buf
+
+
+_engs: Dict[Any, Any] = {}
+
+
+@contextmanager
+def capture_context_buffer(**kw):
+    if kw.pop("bytes_io", False):
+        buf = io.BytesIO()
+    else:
+        buf = io.StringIO()
+
+    kw.update({"dialect_name": "sqlite", "output_buffer": buf})
+    conf = EnvironmentContext.configure
+
+    def configure(*arg, **opt):
+        opt.update(**kw)
+        return conf(*arg, **opt)
+
+    with mock.patch.object(EnvironmentContext, "configure", configure):
+        yield buf
+
+
+@contextmanager
+def capture_engine_context_buffer(**kw):
+    from .env import _sqlite_file_db
+    from sqlalchemy import event
+
+    buf = io.StringIO()
+
+    eng = _sqlite_file_db()
+
+    conn = eng.connect()
+
+    @event.listens_for(conn, "before_cursor_execute")
+    def bce(conn, cursor, statement, parameters, context, executemany):
+        buf.write(statement + "\n")
+
+    kw.update({"connection": conn})
+    conf = EnvironmentContext.configure
+
+    def configure(*arg, **opt):
+        opt.update(**kw)
+        return conf(*arg, **opt)
+
+    with mock.patch.object(EnvironmentContext, "configure", configure):
+        yield buf
+
+
+def op_fixture(
+    dialect="default",
+    as_sql=False,
+    naming_convention=None,
+    literal_binds=False,
+    native_boolean=None,
+):
+    opts = {}
+    if naming_convention:
+        opts["target_metadata"] = MetaData(naming_convention=naming_convention)
+
+    class buffer_:
+        def __init__(self):
+            self.lines = []
+
+        def write(self, msg):
+            msg = msg.strip()
+            msg = re.sub(r"[\n\t]", "", msg)
+            if as_sql:
+                # the impl produces soft tabs,
+                # so search for blocks of 4 spaces
+                msg = re.sub(r"    ", "", msg)
+                msg = re.sub(r"\;\n*$", "", msg)
+
+            self.lines.append(msg)
+
+        def flush(self):
+            pass
+
+    buf = buffer_()
+
+    class ctx(MigrationContext):
+        def get_buf(self):
+            return buf
+
+        def clear_assertions(self):
+            buf.lines[:] = []
+
+        def assert_(self, *sql):
+            # TODO: make this more flexible about
+            # whitespace and such
+            eq_(buf.lines, [re.sub(r"[\n\t]", "", s) for s in sql])
+
+        def assert_contains(self, sql):
+            for stmt in buf.lines:
+                if re.sub(r"[\n\t]", "", sql) in stmt:
+                    return
+            else:
+                assert False, "Could not locate fragment %r in %r" % (
+                    sql,
+                    buf.lines,
+                )
+
+    if as_sql:
+        opts["as_sql"] = as_sql
+    if literal_binds:
+        opts["literal_binds"] = literal_binds
+    if not sqla_14 and dialect == "mariadb":
+        ctx_dialect = _get_dialect("mysql")
+        ctx_dialect.server_version_info = (10, 4, 0, "MariaDB")
+
+    else:
+        ctx_dialect = _get_dialect(dialect)
+    if native_boolean is not None:
+        ctx_dialect.supports_native_boolean = native_boolean
+        # this is new as of SQLAlchemy 1.2.7 and is used by SQL Server,
+        # which breaks assumptions in the alembic test suite
+        ctx_dialect.non_native_boolean_check_constraint = True
+    if not as_sql:
+
+        def execute(stmt, *multiparam, **param):
+            if isinstance(stmt, str):
+                stmt = text(stmt)
+            assert stmt.supports_execution
+            sql = str(stmt.compile(dialect=ctx_dialect))
+
+            buf.write(sql)
+
+        connection = mock.Mock(dialect=ctx_dialect, execute=execute)
+    else:
+        opts["output_buffer"] = buf
+        connection = None
+    context = ctx(ctx_dialect, connection, opts)
+
+    alembic.op._proxy = Operations(context)
+    return context
+
+
+class AlterColRoundTripFixture:
+    # since these tests are about syntax, use more recent SQLAlchemy as some of
+    # the type / server default compare logic might not work on older
+    # SQLAlchemy versions as seems to be the case for SQLAlchemy 1.1 on Oracle
+
+    __requires__ = ("alter_column",)
+
+    def setUp(self):
+        self.conn = config.db.connect()
+        self.ctx = MigrationContext.configure(self.conn)
+        self.op = Operations(self.ctx)
+        self.metadata = MetaData()
+
+    def _compare_type(self, t1, t2):
+        c1 = Column("q", t1)
+        c2 = Column("q", t2)
+        assert not self.ctx.impl.compare_type(
+            c1, c2
+        ), "Type objects %r and %r didn't compare as equivalent" % (t1, t2)
+
+    def _compare_server_default(self, t1, s1, t2, s2):
+        c1 = Column("q", t1, server_default=s1)
+        c2 = Column("q", t2, server_default=s2)
+        assert not self.ctx.impl.compare_server_default(
+            c1, c2, s2, s1
+        ), "server defaults %r and %r didn't compare as equivalent" % (s1, s2)
+
+    def tearDown(self):
+        sqla_compat._safe_rollback_connection_transaction(self.conn)
+        with self.conn.begin():
+            self.metadata.drop_all(self.conn)
+        self.conn.close()
+
+    def _run_alter_col(self, from_, to_, compare=None):
+        column = Column(
+            from_.get("name", "colname"),
+            from_.get("type", String(10)),
+            nullable=from_.get("nullable", True),
+            server_default=from_.get("server_default", None),
+            # comment=from_.get("comment", None)
+        )
+        t = Table("x", self.metadata, column)
+
+        with sqla_compat._ensure_scope_for_ddl(self.conn):
+            t.create(self.conn)
+            insp = inspect(self.conn)
+            old_col = insp.get_columns("x")[0]
+
+            # TODO: conditional comment support
+            self.op.alter_column(
+                "x",
+                column.name,
+                existing_type=column.type,
+                existing_server_default=column.server_default
+                if column.server_default is not None
+                else False,
+                existing_nullable=True if column.nullable else False,
+                # existing_comment=column.comment,
+                nullable=to_.get("nullable", None),
+                # modify_comment=False,
+                server_default=to_.get("server_default", False),
+                new_column_name=to_.get("name", None),
+                type_=to_.get("type", None),
+            )
+
+        insp = inspect(self.conn)
+        new_col = insp.get_columns("x")[0]
+
+        if compare is None:
+            compare = to_
+
+        eq_(
+            new_col["name"],
+            compare["name"] if "name" in compare else column.name,
+        )
+        self._compare_type(
+            new_col["type"], compare.get("type", old_col["type"])
+        )
+        eq_(new_col["nullable"], compare.get("nullable", column.nullable))
+        self._compare_server_default(
+            new_col["type"],
+            new_col.get("default", None),
+            compare.get("type", old_col["type"]),
+            compare["server_default"].text
+            if "server_default" in compare
+            else column.server_default.arg.text
+            if column.server_default is not None
+            else None,
+        )
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/__init__.py b/venv/Lib/site-packages/alembic/testing/plugin/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/plugin/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b126d12b4d067afab9ac5999442613e5653e195
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/plugin/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/__pycache__/bootstrap.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/plugin/__pycache__/bootstrap.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef8fc49fb955ad0f9c9929f5154ad1564896e90a
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/plugin/__pycache__/bootstrap.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/plugin/bootstrap.py b/venv/Lib/site-packages/alembic/testing/plugin/bootstrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4a2c5521847f6d34003b49b2826ae49b1d84c29
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/plugin/bootstrap.py
@@ -0,0 +1,4 @@
+"""
+Bootstrapper for test framework plugins.
+
+"""
diff --git a/venv/Lib/site-packages/alembic/testing/requirements.py b/venv/Lib/site-packages/alembic/testing/requirements.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e07e28ea48a7615ad3a1d8a874873eba97ee148
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/requirements.py
@@ -0,0 +1,210 @@
+from sqlalchemy.testing.requirements import Requirements
+
+from alembic import util
+from alembic.util import sqla_compat
+from ..testing import exclusions
+
+
+class SuiteRequirements(Requirements):
+    @property
+    def schemas(self):
+        """Target database must support external schemas, and have one
+        named 'test_schema'."""
+
+        return exclusions.open()
+
+    @property
+    def autocommit_isolation(self):
+        """target database should support 'AUTOCOMMIT' isolation level"""
+
+        return exclusions.closed()
+
+    @property
+    def materialized_views(self):
+        """needed for sqlalchemy compat"""
+        return exclusions.closed()
+
+    @property
+    def unique_constraint_reflection(self):
+        def doesnt_have_check_uq_constraints(config):
+            from sqlalchemy import inspect
+
+            insp = inspect(config.db)
+            try:
+                insp.get_unique_constraints("x")
+            except NotImplementedError:
+                return True
+            except TypeError:
+                return True
+            except Exception:
+                pass
+            return False
+
+        return exclusions.skip_if(doesnt_have_check_uq_constraints)
+
+    @property
+    def sequences(self):
+        """Target database must support SEQUENCEs."""
+
+        return exclusions.only_if(
+            [lambda config: config.db.dialect.supports_sequences],
+            "no sequence support",
+        )
+
+    @property
+    def foreign_key_match(self):
+        return exclusions.open()
+
+    @property
+    def foreign_key_constraint_reflection(self):
+        return exclusions.open()
+
+    @property
+    def check_constraints_w_enforcement(self):
+        """Target database must support check constraints
+        and also enforce them."""
+
+        return exclusions.open()
+
+    @property
+    def reflects_pk_names(self):
+        return exclusions.closed()
+
+    @property
+    def reflects_fk_options(self):
+        return exclusions.closed()
+
+    @property
+    def sqlalchemy_14(self):
+        return exclusions.skip_if(
+            lambda config: not util.sqla_14,
+            "SQLAlchemy 1.4 or greater required",
+        )
+
+    @property
+    def sqlalchemy_1x(self):
+        return exclusions.skip_if(
+            lambda config: util.sqla_2,
+            "SQLAlchemy 1.x test",
+        )
+
+    @property
+    def sqlalchemy_2(self):
+        return exclusions.skip_if(
+            lambda config: not util.sqla_2,
+            "SQLAlchemy 2.x test",
+        )
+
+    @property
+    def asyncio(self):
+        def go(config):
+            try:
+                import greenlet  # noqa: F401
+            except ImportError:
+                return False
+            else:
+                return True
+
+        return self.sqlalchemy_14 + exclusions.only_if(go)
+
+    @property
+    def comments(self):
+        return exclusions.only_if(
+            lambda config: config.db.dialect.supports_comments
+        )
+
+    @property
+    def alter_column(self):
+        return exclusions.open()
+
+    @property
+    def computed_columns(self):
+        return exclusions.closed()
+
+    @property
+    def computed_columns_api(self):
+        return exclusions.only_if(
+            exclusions.BooleanPredicate(sqla_compat.has_computed)
+        )
+
+    @property
+    def computed_reflects_normally(self):
+        return exclusions.only_if(
+            exclusions.BooleanPredicate(sqla_compat.has_computed_reflection)
+        )
+
+    @property
+    def computed_reflects_as_server_default(self):
+        return exclusions.closed()
+
+    @property
+    def computed_doesnt_reflect_as_server_default(self):
+        return exclusions.closed()
+
+    @property
+    def autoincrement_on_composite_pk(self):
+        return exclusions.closed()
+
+    @property
+    def fk_ondelete_is_reflected(self):
+        return exclusions.closed()
+
+    @property
+    def fk_onupdate_is_reflected(self):
+        return exclusions.closed()
+
+    @property
+    def fk_onupdate(self):
+        return exclusions.open()
+
+    @property
+    def fk_ondelete_restrict(self):
+        return exclusions.open()
+
+    @property
+    def fk_onupdate_restrict(self):
+        return exclusions.open()
+
+    @property
+    def fk_ondelete_noaction(self):
+        return exclusions.open()
+
+    @property
+    def fk_initially(self):
+        return exclusions.closed()
+
+    @property
+    def fk_deferrable(self):
+        return exclusions.closed()
+
+    @property
+    def fk_deferrable_is_reflected(self):
+        return exclusions.closed()
+
+    @property
+    def fk_names(self):
+        return exclusions.open()
+
+    @property
+    def integer_subtype_comparisons(self):
+        return exclusions.open()
+
+    @property
+    def no_name_normalize(self):
+        return exclusions.skip_if(
+            lambda config: config.db.dialect.requires_name_normalize
+        )
+
+    @property
+    def identity_columns(self):
+        return exclusions.closed()
+
+    @property
+    def identity_columns_alter(self):
+        return exclusions.closed()
+
+    @property
+    def identity_columns_api(self):
+        return exclusions.only_if(
+            exclusions.BooleanPredicate(sqla_compat.has_identity)
+        )
diff --git a/venv/Lib/site-packages/alembic/testing/schemacompare.py b/venv/Lib/site-packages/alembic/testing/schemacompare.py
new file mode 100644
index 0000000000000000000000000000000000000000..204cc4ddc15b1457cdbacb2c238a625e19c49100
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/schemacompare.py
@@ -0,0 +1,169 @@
+from itertools import zip_longest
+
+from sqlalchemy import schema
+from sqlalchemy.sql.elements import ClauseList
+
+
+class CompareTable:
+    def __init__(self, table):
+        self.table = table
+
+    def __eq__(self, other):
+        if self.table.name != other.name or self.table.schema != other.schema:
+            return False
+
+        for c1, c2 in zip_longest(self.table.c, other.c):
+            if (c1 is None and c2 is not None) or (
+                c2 is None and c1 is not None
+            ):
+                return False
+            if CompareColumn(c1) != c2:
+                return False
+
+        return True
+
+        # TODO: compare constraints, indexes
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CompareColumn:
+    def __init__(self, column):
+        self.column = column
+
+    def __eq__(self, other):
+        return (
+            self.column.name == other.name
+            and self.column.nullable == other.nullable
+        )
+        # TODO: datatypes etc
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CompareIndex:
+    def __init__(self, index, name_only=False):
+        self.index = index
+        self.name_only = name_only
+
+    def __eq__(self, other):
+        if self.name_only:
+            return self.index.name == other.name
+        else:
+            return (
+                str(schema.CreateIndex(self.index))
+                == str(schema.CreateIndex(other))
+                and self.index.dialect_kwargs == other.dialect_kwargs
+            )
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __repr__(self):
+        expr = ClauseList(*self.index.expressions)
+        try:
+            expr_str = expr.compile().string
+        except Exception:
+            expr_str = str(expr)
+        return f"<CompareIndex {self.index.name}({expr_str})>"
+
+
+class CompareCheckConstraint:
+    def __init__(self, constraint):
+        self.constraint = constraint
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, schema.CheckConstraint)
+            and self.constraint.name == other.name
+            and (str(self.constraint.sqltext) == str(other.sqltext))
+            and (other.table.name == self.constraint.table.name)
+            and other.table.schema == self.constraint.table.schema
+        )
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CompareForeignKey:
+    def __init__(self, constraint):
+        self.constraint = constraint
+
+    def __eq__(self, other):
+        r1 = (
+            isinstance(other, schema.ForeignKeyConstraint)
+            and self.constraint.name == other.name
+            and (other.table.name == self.constraint.table.name)
+            and other.table.schema == self.constraint.table.schema
+        )
+        if not r1:
+            return False
+        for c1, c2 in zip_longest(self.constraint.columns, other.columns):
+            if (c1 is None and c2 is not None) or (
+                c2 is None and c1 is not None
+            ):
+                return False
+            if CompareColumn(c1) != c2:
+                return False
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class ComparePrimaryKey:
+    def __init__(self, constraint):
+        self.constraint = constraint
+
+    def __eq__(self, other):
+        r1 = (
+            isinstance(other, schema.PrimaryKeyConstraint)
+            and self.constraint.name == other.name
+            and (other.table.name == self.constraint.table.name)
+            and other.table.schema == self.constraint.table.schema
+        )
+        if not r1:
+            return False
+
+        for c1, c2 in zip_longest(self.constraint.columns, other.columns):
+            if (c1 is None and c2 is not None) or (
+                c2 is None and c1 is not None
+            ):
+                return False
+            if CompareColumn(c1) != c2:
+                return False
+
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class CompareUniqueConstraint:
+    def __init__(self, constraint):
+        self.constraint = constraint
+
+    def __eq__(self, other):
+        r1 = (
+            isinstance(other, schema.UniqueConstraint)
+            and self.constraint.name == other.name
+            and (other.table.name == self.constraint.table.name)
+            and other.table.schema == self.constraint.table.schema
+        )
+        if not r1:
+            return False
+
+        for c1, c2 in zip_longest(self.constraint.columns, other.columns):
+            if (c1 is None and c2 is not None) or (
+                c2 is None and c1 is not None
+            ):
+                return False
+            if CompareColumn(c1) != c2:
+                return False
+
+        return True
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__init__.py b/venv/Lib/site-packages/alembic/testing/suite/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3da498d289ed11e1eb140384db8d601bfcd524aa
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/__init__.py
@@ -0,0 +1,7 @@
+from .test_autogen_comments import *  # noqa
+from .test_autogen_computed import *  # noqa
+from .test_autogen_diffs import *  # noqa
+from .test_autogen_fks import *  # noqa
+from .test_autogen_identity import *  # noqa
+from .test_environment import *  # noqa
+from .test_op import *  # noqa
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8bcd92092e2d2537f42338948a0cb693a24de87a
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/_autogen_fixtures.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/_autogen_fixtures.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f8bf0fdb825a1f152462e640079b602203bf26a1
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/_autogen_fixtures.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_comments.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_comments.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c898177f13f0804f3b9843f7cd36caa263ea9801
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_comments.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_computed.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_computed.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e369114c75798d754779a805ffbf7a59c959517
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_computed.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_diffs.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_diffs.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9fc73fc2f0e11cc8c0acd0429a0724e4499412d2
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_diffs.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_fks.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_fks.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e2f4878eb9d40a170897f10e2631c26543d266e
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_fks.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_identity.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_identity.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d28fedac8735ee91e2adcd946279158bcf602da
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_autogen_identity.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_environment.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_environment.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df9caefa201546b298571c2a45d9a42f2a1959f5
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_environment.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_op.cpython-311.pyc b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_op.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5744d7b17efa02ee95868189fae3e08d2bf6be5b
Binary files /dev/null and b/venv/Lib/site-packages/alembic/testing/suite/__pycache__/test_op.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/testing/suite/_autogen_fixtures.py b/venv/Lib/site-packages/alembic/testing/suite/_autogen_fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..d838ebef1068b5cc38a4a18f1b16b6cd00876581
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/_autogen_fixtures.py
@@ -0,0 +1,335 @@
+from __future__ import annotations
+
+from typing import Any
+from typing import Dict
+from typing import Set
+
+from sqlalchemy import CHAR
+from sqlalchemy import CheckConstraint
+from sqlalchemy import Column
+from sqlalchemy import event
+from sqlalchemy import ForeignKey
+from sqlalchemy import Index
+from sqlalchemy import inspect
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import Numeric
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import Text
+from sqlalchemy import text
+from sqlalchemy import UniqueConstraint
+
+from ... import autogenerate
+from ... import util
+from ...autogenerate import api
+from ...ddl.base import _fk_spec
+from ...migration import MigrationContext
+from ...operations import ops
+from ...testing import config
+from ...testing import eq_
+from ...testing.env import clear_staging_env
+from ...testing.env import staging_env
+
+names_in_this_test: Set[Any] = set()
+
+
+@event.listens_for(Table, "after_parent_attach")
+def new_table(table, parent):
+    names_in_this_test.add(table.name)
+
+
+def _default_include_object(obj, name, type_, reflected, compare_to):
+    if type_ == "table":
+        return name in names_in_this_test
+    else:
+        return True
+
+
+_default_object_filters: Any = _default_include_object
+
+_default_name_filters: Any = None
+
+
+class ModelOne:
+    __requires__ = ("unique_constraint_reflection",)
+
+    schema: Any = None
+
+    @classmethod
+    def _get_db_schema(cls):
+        schema = cls.schema
+
+        m = MetaData(schema=schema)
+
+        Table(
+            "user",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50)),
+            Column("a1", Text),
+            Column("pw", String(50)),
+            Index("pw_idx", "pw"),
+        )
+
+        Table(
+            "address",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("email_address", String(100), nullable=False),
+        )
+
+        Table(
+            "order",
+            m,
+            Column("order_id", Integer, primary_key=True),
+            Column(
+                "amount",
+                Numeric(8, 2),
+                nullable=False,
+                server_default=text("0"),
+            ),
+            CheckConstraint("amount >= 0", name="ck_order_amount"),
+        )
+
+        Table(
+            "extra",
+            m,
+            Column("x", CHAR),
+            Column("uid", Integer, ForeignKey("user.id")),
+        )
+
+        return m
+
+    @classmethod
+    def _get_model_schema(cls):
+        schema = cls.schema
+
+        m = MetaData(schema=schema)
+
+        Table(
+            "user",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", Text, server_default="x"),
+        )
+
+        Table(
+            "address",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("email_address", String(100), nullable=False),
+            Column("street", String(50)),
+            UniqueConstraint("email_address", name="uq_email"),
+        )
+
+        Table(
+            "order",
+            m,
+            Column("order_id", Integer, primary_key=True),
+            Column(
+                "amount",
+                Numeric(10, 2),
+                nullable=True,
+                server_default=text("0"),
+            ),
+            Column("user_id", Integer, ForeignKey("user.id")),
+            CheckConstraint("amount > -1", name="ck_order_amount"),
+        )
+
+        Table(
+            "item",
+            m,
+            Column("id", Integer, primary_key=True),
+            Column("description", String(100)),
+            Column("order_id", Integer, ForeignKey("order.order_id")),
+            CheckConstraint("len(description) > 5"),
+        )
+        return m
+
+
+class _ComparesFKs:
+    def _assert_fk_diff(
+        self,
+        diff,
+        type_,
+        source_table,
+        source_columns,
+        target_table,
+        target_columns,
+        name=None,
+        conditional_name=None,
+        source_schema=None,
+        onupdate=None,
+        ondelete=None,
+        initially=None,
+        deferrable=None,
+    ):
+        # the public API for ForeignKeyConstraint was not very rich
+        # in 0.7, 0.8, so here we use the well-known but slightly
+        # private API to get at its elements
+        (
+            fk_source_schema,
+            fk_source_table,
+            fk_source_columns,
+            fk_target_schema,
+            fk_target_table,
+            fk_target_columns,
+            fk_onupdate,
+            fk_ondelete,
+            fk_deferrable,
+            fk_initially,
+        ) = _fk_spec(diff[1])
+
+        eq_(diff[0], type_)
+        eq_(fk_source_table, source_table)
+        eq_(fk_source_columns, source_columns)
+        eq_(fk_target_table, target_table)
+        eq_(fk_source_schema, source_schema)
+        eq_(fk_onupdate, onupdate)
+        eq_(fk_ondelete, ondelete)
+        eq_(fk_initially, initially)
+        eq_(fk_deferrable, deferrable)
+
+        eq_([elem.column.name for elem in diff[1].elements], target_columns)
+        if conditional_name is not None:
+            if conditional_name == "servergenerated":
+                fks = inspect(self.bind).get_foreign_keys(source_table)
+                server_fk_name = fks[0]["name"]
+                eq_(diff[1].name, server_fk_name)
+            else:
+                eq_(diff[1].name, conditional_name)
+        else:
+            eq_(diff[1].name, name)
+
+
+class AutogenTest(_ComparesFKs):
+    def _flatten_diffs(self, diffs):
+        for d in diffs:
+            if isinstance(d, list):
+                yield from self._flatten_diffs(d)
+            else:
+                yield d
+
+    @classmethod
+    def _get_bind(cls):
+        return config.db
+
+    configure_opts: Dict[Any, Any] = {}
+
+    @classmethod
+    def setup_class(cls):
+        staging_env()
+        cls.bind = cls._get_bind()
+        cls.m1 = cls._get_db_schema()
+        cls.m1.create_all(cls.bind)
+        cls.m2 = cls._get_model_schema()
+
+    @classmethod
+    def teardown_class(cls):
+        cls.m1.drop_all(cls.bind)
+        clear_staging_env()
+
+    def setUp(self):
+        self.conn = conn = self.bind.connect()
+        ctx_opts = {
+            "compare_type": True,
+            "compare_server_default": True,
+            "target_metadata": self.m2,
+            "upgrade_token": "upgrades",
+            "downgrade_token": "downgrades",
+            "alembic_module_prefix": "op.",
+            "sqlalchemy_module_prefix": "sa.",
+            "include_object": _default_object_filters,
+            "include_name": _default_name_filters,
+        }
+        if self.configure_opts:
+            ctx_opts.update(self.configure_opts)
+        self.context = context = MigrationContext.configure(
+            connection=conn, opts=ctx_opts
+        )
+
+        self.autogen_context = api.AutogenContext(context, self.m2)
+
+    def tearDown(self):
+        self.conn.close()
+
+    def _update_context(
+        self, object_filters=None, name_filters=None, include_schemas=None
+    ):
+        if include_schemas is not None:
+            self.autogen_context.opts["include_schemas"] = include_schemas
+        if object_filters is not None:
+            self.autogen_context._object_filters = [object_filters]
+        if name_filters is not None:
+            self.autogen_context._name_filters = [name_filters]
+        return self.autogen_context
+
+
+class AutogenFixtureTest(_ComparesFKs):
+    def _fixture(
+        self,
+        m1,
+        m2,
+        include_schemas=False,
+        opts=None,
+        object_filters=_default_object_filters,
+        name_filters=_default_name_filters,
+        return_ops=False,
+        max_identifier_length=None,
+    ):
+        if max_identifier_length:
+            dialect = self.bind.dialect
+            existing_length = dialect.max_identifier_length
+            dialect.max_identifier_length = (
+                dialect._user_defined_max_identifier_length
+            ) = max_identifier_length
+        try:
+            self._alembic_metadata, model_metadata = m1, m2
+            for m in util.to_list(self._alembic_metadata):
+                m.create_all(self.bind)
+
+            with self.bind.connect() as conn:
+                ctx_opts = {
+                    "compare_type": True,
+                    "compare_server_default": True,
+                    "target_metadata": model_metadata,
+                    "upgrade_token": "upgrades",
+                    "downgrade_token": "downgrades",
+                    "alembic_module_prefix": "op.",
+                    "sqlalchemy_module_prefix": "sa.",
+                    "include_object": object_filters,
+                    "include_name": name_filters,
+                    "include_schemas": include_schemas,
+                }
+                if opts:
+                    ctx_opts.update(opts)
+                self.context = context = MigrationContext.configure(
+                    connection=conn, opts=ctx_opts
+                )
+
+                autogen_context = api.AutogenContext(context, model_metadata)
+                uo = ops.UpgradeOps(ops=[])
+                autogenerate._produce_net_changes(autogen_context, uo)
+
+                if return_ops:
+                    return uo
+                else:
+                    return uo.as_diffs()
+        finally:
+            if max_identifier_length:
+                dialect = self.bind.dialect
+                dialect.max_identifier_length = (
+                    dialect._user_defined_max_identifier_length
+                ) = existing_length
+
+    def setUp(self):
+        staging_env()
+        self.bind = config.db
+
+    def tearDown(self):
+        if hasattr(self, "_alembic_metadata"):
+            for m in util.to_list(self._alembic_metadata):
+                m.drop_all(self.bind)
+        clear_staging_env()
diff --git a/venv/Lib/site-packages/alembic/testing/suite/test_autogen_comments.py b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_comments.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ef074f57893180048c6193455b0dd1d507c0603
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_comments.py
@@ -0,0 +1,242 @@
+from sqlalchemy import Column
+from sqlalchemy import Float
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+
+from ._autogen_fixtures import AutogenFixtureTest
+from ...testing import eq_
+from ...testing import mock
+from ...testing import TestBase
+
+
+class AutogenerateCommentsTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+
+    __requires__ = ("comments",)
+
+    def test_existing_table_comment_no_change(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_add_table_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("some_table", m1, Column("test", String(10), primary_key=True))
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "add_table_comment")
+        eq_(diffs[0][1].comment, "this is some table")
+        eq_(diffs[0][2], None)
+
+    def test_remove_table_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        Table("some_table", m2, Column("test", String(10), primary_key=True))
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "remove_table_comment")
+        eq_(diffs[0][1].comment, None)
+
+    def test_alter_table_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            comment="this is some table",
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            comment="this is also some table",
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "add_table_comment")
+        eq_(diffs[0][1].comment, "this is also some table")
+        eq_(diffs[0][2], "this is some table")
+
+    def test_existing_column_comment_no_change(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_add_column_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        diffs = self._fixture(m1, m2)
+        eq_(
+            diffs,
+            [
+                [
+                    (
+                        "modify_comment",
+                        None,
+                        "some_table",
+                        "amount",
+                        {
+                            "existing_nullable": True,
+                            "existing_type": mock.ANY,
+                            "existing_server_default": False,
+                        },
+                        None,
+                        "the amount",
+                    )
+                ]
+            ],
+        )
+
+    def test_remove_column_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float),
+        )
+
+        diffs = self._fixture(m1, m2)
+        eq_(
+            diffs,
+            [
+                [
+                    (
+                        "modify_comment",
+                        None,
+                        "some_table",
+                        "amount",
+                        {
+                            "existing_nullable": True,
+                            "existing_type": mock.ANY,
+                            "existing_server_default": False,
+                        },
+                        "the amount",
+                        None,
+                    )
+                ]
+            ],
+        )
+
+    def test_alter_column_comment(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the amount"),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+            Column("amount", Float, comment="the adjusted amount"),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(
+            diffs,
+            [
+                [
+                    (
+                        "modify_comment",
+                        None,
+                        "some_table",
+                        "amount",
+                        {
+                            "existing_nullable": True,
+                            "existing_type": mock.ANY,
+                            "existing_server_default": False,
+                        },
+                        "the amount",
+                        "the adjusted amount",
+                    )
+                ]
+            ],
+        )
diff --git a/venv/Lib/site-packages/alembic/testing/suite/test_autogen_computed.py b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_computed.py
new file mode 100644
index 0000000000000000000000000000000000000000..01a89a1fe85422cc5c4dcfca92d7571040b97d16
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_computed.py
@@ -0,0 +1,203 @@
+import sqlalchemy as sa
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+
+from ._autogen_fixtures import AutogenFixtureTest
+from ... import testing
+from ...testing import config
+from ...testing import eq_
+from ...testing import exclusions
+from ...testing import is_
+from ...testing import is_true
+from ...testing import mock
+from ...testing import TestBase
+
+
+class AutogenerateComputedTest(AutogenFixtureTest, TestBase):
+    __requires__ = ("computed_columns",)
+    __backend__ = True
+
+    def test_add_computed_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("user", m1, Column("id", Integer, primary_key=True))
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("foo", Integer, sa.Computed("5")),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "add_column")
+        eq_(diffs[0][2], "user")
+        eq_(diffs[0][3].name, "foo")
+        c = diffs[0][3].computed
+
+        is_true(isinstance(c, sa.Computed))
+        is_(c.persisted, None)
+        eq_(str(c.sqltext), "5")
+
+    def test_remove_computed_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("foo", Integer, sa.Computed("5")),
+        )
+
+        Table("user", m2, Column("id", Integer, primary_key=True))
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "remove_column")
+        eq_(diffs[0][2], "user")
+        c = diffs[0][3]
+        eq_(c.name, "foo")
+
+        if config.requirements.computed_reflects_normally.enabled:
+            is_true(isinstance(c.computed, sa.Computed))
+        else:
+            is_(c.computed, None)
+
+        if config.requirements.computed_reflects_as_server_default.enabled:
+            is_true(isinstance(c.server_default, sa.DefaultClause))
+            eq_(str(c.server_default.arg.text), "5")
+        elif config.requirements.computed_reflects_normally.enabled:
+            is_true(isinstance(c.computed, sa.Computed))
+        else:
+            is_(c.computed, None)
+
+    @testing.combinations(
+        lambda: (None, sa.Computed("bar*5")),
+        (lambda: (sa.Computed("bar*5"), None)),
+        lambda: (
+            sa.Computed("bar*5"),
+            sa.Computed("bar * 42", persisted=True),
+        ),
+        lambda: (sa.Computed("bar*5"), sa.Computed("bar * 42")),
+    )
+    @config.requirements.computed_reflects_normally
+    def test_cant_change_computed_warning(self, test_case):
+        arg_before, arg_after = testing.resolve_lambda(test_case, **locals())
+        m1 = MetaData()
+        m2 = MetaData()
+
+        arg_before = [] if arg_before is None else [arg_before]
+        arg_after = [] if arg_after is None else [arg_after]
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, *arg_before),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, *arg_after),
+        )
+
+        with mock.patch("alembic.util.warn") as mock_warn:
+            diffs = self._fixture(m1, m2)
+
+        eq_(
+            mock_warn.mock_calls,
+            [mock.call("Computed default on user.foo cannot be modified")],
+        )
+
+        eq_(list(diffs), [])
+
+    @testing.combinations(
+        lambda: (None, None),
+        lambda: (sa.Computed("5"), sa.Computed("5")),
+        lambda: (sa.Computed("bar*5"), sa.Computed("bar*5")),
+        (
+            lambda: (sa.Computed("bar*5"), None),
+            config.requirements.computed_doesnt_reflect_as_server_default,
+        ),
+    )
+    def test_computed_unchanged(self, test_case):
+        arg_before, arg_after = testing.resolve_lambda(test_case, **locals())
+        m1 = MetaData()
+        m2 = MetaData()
+
+        arg_before = [] if arg_before is None else [arg_before]
+        arg_after = [] if arg_after is None else [arg_after]
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, *arg_before),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, *arg_after),
+        )
+
+        with mock.patch("alembic.util.warn") as mock_warn:
+            diffs = self._fixture(m1, m2)
+        eq_(mock_warn.mock_calls, [])
+
+        eq_(list(diffs), [])
+
+    @config.requirements.computed_reflects_as_server_default
+    def test_remove_computed_default_on_computed(self):
+        """Asserts the current behavior which is that on PG and Oracle,
+        the GENERATED ALWAYS AS is reflected as a server default which we can't
+        tell is actually "computed", so these come out as a modification to
+        the server default.
+
+        """
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer, sa.Computed("bar + 42")),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("bar", Integer),
+            Column("foo", Integer),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0][0], "modify_default")
+        eq_(diffs[0][0][2], "user")
+        eq_(diffs[0][0][3], "foo")
+        old = diffs[0][0][-2]
+        new = diffs[0][0][-1]
+
+        is_(new, None)
+        is_true(isinstance(old, sa.DefaultClause))
+
+        if exclusions.against(config, "postgresql"):
+            eq_(str(old.arg.text), "(bar + 42)")
+        elif exclusions.against(config, "oracle"):
+            eq_(str(old.arg.text), '"BAR"+42')
diff --git a/venv/Lib/site-packages/alembic/testing/suite/test_autogen_diffs.py b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_diffs.py
new file mode 100644
index 0000000000000000000000000000000000000000..75bcd37aeec53d4afb2447a0f7aaf8ab5ef4c160
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_diffs.py
@@ -0,0 +1,273 @@
+from sqlalchemy import BigInteger
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+from sqlalchemy.testing import in_
+
+from ._autogen_fixtures import AutogenFixtureTest
+from ... import testing
+from ...testing import config
+from ...testing import eq_
+from ...testing import is_
+from ...testing import TestBase
+
+
+class AlterColumnTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+
+    @testing.combinations((True,), (False,))
+    @config.requirements.comments
+    def test_all_existings_filled(self, pk):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, primary_key=pk))
+        Table("a", m2, Column("x", Integer, comment="x", primary_key=pk))
+
+        alter_col = self._assert_alter_col(m1, m2, pk)
+        eq_(alter_col.modify_comment, "x")
+
+    @testing.combinations((True,), (False,))
+    @config.requirements.comments
+    def test_all_existings_filled_in_notnull(self, pk):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, nullable=False, primary_key=pk))
+        Table(
+            "a",
+            m2,
+            Column("x", Integer, nullable=False, comment="x", primary_key=pk),
+        )
+
+        self._assert_alter_col(m1, m2, pk, nullable=False)
+
+    @testing.combinations((True,), (False,))
+    @config.requirements.comments
+    def test_all_existings_filled_in_comment(self, pk):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, comment="old", primary_key=pk))
+        Table("a", m2, Column("x", Integer, comment="new", primary_key=pk))
+
+        alter_col = self._assert_alter_col(m1, m2, pk)
+        eq_(alter_col.existing_comment, "old")
+
+    @testing.combinations((True,), (False,))
+    @config.requirements.comments
+    def test_all_existings_filled_in_server_default(self, pk):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a", m1, Column("x", Integer, server_default="5", primary_key=pk)
+        )
+        Table(
+            "a",
+            m2,
+            Column(
+                "x", Integer, server_default="5", comment="new", primary_key=pk
+            ),
+        )
+
+        alter_col = self._assert_alter_col(m1, m2, pk)
+        in_("5", alter_col.existing_server_default.arg.text)
+
+    def _assert_alter_col(self, m1, m2, pk, nullable=None):
+        ops = self._fixture(m1, m2, return_ops=True)
+        modify_table = ops.ops[-1]
+        alter_col = modify_table.ops[0]
+
+        if nullable is None:
+            eq_(alter_col.existing_nullable, not pk)
+        else:
+            eq_(alter_col.existing_nullable, nullable)
+        assert alter_col.existing_type._compare_type_affinity(Integer())
+        return alter_col
+
+
+class AutoincrementTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+    __requires__ = ("integer_subtype_comparisons",)
+
+    def test_alter_column_autoincrement_none(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, nullable=False))
+        Table("a", m2, Column("x", Integer, nullable=True))
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        assert "autoincrement" not in ops.ops[0].ops[0].kw
+
+    def test_alter_column_autoincrement_pk_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("x", Integer, primary_key=True, autoincrement=False),
+        )
+        Table(
+            "a",
+            m2,
+            Column("x", BigInteger, primary_key=True, autoincrement=False),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], False)
+
+    def test_alter_column_autoincrement_pk_implicit_true(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("a", m1, Column("x", Integer, primary_key=True))
+        Table("a", m2, Column("x", BigInteger, primary_key=True))
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], True)
+
+    def test_alter_column_autoincrement_pk_explicit_true(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a", m1, Column("x", Integer, primary_key=True, autoincrement=True)
+        )
+        Table(
+            "a",
+            m2,
+            Column("x", BigInteger, primary_key=True, autoincrement=True),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], True)
+
+    def test_alter_column_autoincrement_nonpk_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer, autoincrement=False),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("x", BigInteger, autoincrement=False),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], False)
+
+    def test_alter_column_autoincrement_nonpk_implicit_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("x", BigInteger),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        assert "autoincrement" not in ops.ops[0].ops[0].kw
+
+    def test_alter_column_autoincrement_nonpk_explicit_true(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True, autoincrement=False),
+            Column("x", Integer, autoincrement=True),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True, autoincrement=False),
+            Column("x", BigInteger, autoincrement=True),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], True)
+
+    def test_alter_column_autoincrement_compositepk_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer, primary_key=True, autoincrement=False),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("x", BigInteger, primary_key=True, autoincrement=False),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], False)
+
+    def test_alter_column_autoincrement_compositepk_implicit_false(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("x", Integer, primary_key=True),
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("x", BigInteger, primary_key=True),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        assert "autoincrement" not in ops.ops[0].ops[0].kw
+
+    @config.requirements.autoincrement_on_composite_pk
+    def test_alter_column_autoincrement_compositepk_explicit_true(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "a",
+            m1,
+            Column("id", Integer, primary_key=True, autoincrement=False),
+            Column("x", Integer, primary_key=True, autoincrement=True),
+            # on SQLA 1.0 and earlier, this being present
+            # trips the "add KEY for the primary key" so that the
+            # AUTO_INCREMENT keyword is accepted by MySQL.  SQLA 1.1 and
+            # greater the columns are just reorganized.
+            mysql_engine="InnoDB",
+        )
+        Table(
+            "a",
+            m2,
+            Column("id", Integer, primary_key=True, autoincrement=False),
+            Column("x", BigInteger, primary_key=True, autoincrement=True),
+        )
+
+        ops = self._fixture(m1, m2, return_ops=True)
+        is_(ops.ops[0].ops[0].kw["autoincrement"], True)
diff --git a/venv/Lib/site-packages/alembic/testing/suite/test_autogen_fks.py b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_fks.py
new file mode 100644
index 0000000000000000000000000000000000000000..0240b98d3872bad0d123493a97ccb4b30dbbb709
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_fks.py
@@ -0,0 +1,1190 @@
+from sqlalchemy import Column
+from sqlalchemy import ForeignKeyConstraint
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+
+from ._autogen_fixtures import AutogenFixtureTest
+from ...testing import combinations
+from ...testing import config
+from ...testing import eq_
+from ...testing import mock
+from ...testing import TestBase
+
+
+class AutogenerateForeignKeysTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+    __requires__ = ("foreign_key_constraint_reflection",)
+
+    def test_remove_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+            ForeignKeyConstraint(["test2"], ["some_table.test"]),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["test2"],
+            "some_table",
+            ["test"],
+            conditional_name="servergenerated",
+        )
+
+    def test_add_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+            ForeignKeyConstraint(["test2"], ["some_table.test"]),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0], "add_fk", "user", ["test2"], "some_table", ["test"]
+        )
+
+    def test_no_change(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", Integer),
+            ForeignKeyConstraint(["test2"], ["some_table.id"]),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", Integer),
+            ForeignKeyConstraint(["test2"], ["some_table.id"]),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_no_change_composite_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+            ),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_casing_convention_changed_so_put_drops_first(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("test", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+            ForeignKeyConstraint(["test2"], ["some_table.test"], name="MyFK"),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("test", String(10), primary_key=True),
+        )
+
+        # foreign key autogen currently does not take "name" into account,
+        # so change the def just for the purposes of testing the
+        # add/drop order for now.
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("test2", String(10)),
+            ForeignKeyConstraint(["a1"], ["some_table.test"], name="myfk"),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["test2"],
+            "some_table",
+            ["test"],
+            name="MyFK" if config.requirements.fk_names.enabled else None,
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["a1"],
+            "some_table",
+            ["test"],
+            name="myfk",
+        )
+
+    def test_add_composite_fk_with_name(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+                name="fk_test_name",
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+        self._assert_fk_diff(
+            diffs[0],
+            "add_fk",
+            "user",
+            ["other_id_1", "other_id_2"],
+            "some_table",
+            ["id_1", "id_2"],
+            name="fk_test_name",
+        )
+
+    @config.requirements.no_name_normalize
+    def test_remove_composite_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+                name="fk_test_name",
+            ),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("a1", String(10), server_default="x"),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["other_id_1", "other_id_2"],
+            "some_table",
+            ["id_1", "id_2"],
+            conditional_name="fk_test_name",
+        )
+
+    def test_add_fk_colkeys(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), key="tid1", primary_key=True),
+            Column("id_2", String(10), key="tid2", primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("other_id_1", String(10), key="oid1"),
+            Column("other_id_2", String(10), key="oid2"),
+            ForeignKeyConstraint(
+                ["oid1", "oid2"],
+                ["some_table.tid1", "some_table.tid2"],
+                name="fk_test_name",
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "add_fk",
+            "user",
+            ["other_id_1", "other_id_2"],
+            "some_table",
+            ["id_1", "id_2"],
+            name="fk_test_name",
+        )
+
+    def test_no_change_colkeys(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id_1", String(10), primary_key=True),
+            Column("id_2", String(10), primary_key=True),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("other_id_1", String(10)),
+            Column("other_id_2", String(10)),
+            ForeignKeyConstraint(
+                ["other_id_1", "other_id_2"],
+                ["some_table.id_1", "some_table.id_2"],
+            ),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id_1", String(10), key="tid1", primary_key=True),
+            Column("id_2", String(10), key="tid2", primary_key=True),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("other_id_1", String(10), key="oid1"),
+            Column("other_id_2", String(10), key="oid2"),
+            ForeignKeyConstraint(
+                ["oid1", "oid2"], ["some_table.tid1", "some_table.tid2"]
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+
+class IncludeHooksTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+    __requires__ = ("fk_names",)
+
+    @combinations(("object",), ("name",))
+    @config.requirements.no_name_normalize
+    def test_remove_connection_fk(self, hook_type):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        ref = Table(
+            "ref",
+            m1,
+            Column("id", Integer, primary_key=True),
+        )
+        t1 = Table(
+            "t",
+            m1,
+            Column("x", Integer),
+            Column("y", Integer),
+        )
+        t1.append_constraint(
+            ForeignKeyConstraint([t1.c.x], [ref.c.id], name="fk1")
+        )
+        t1.append_constraint(
+            ForeignKeyConstraint([t1.c.y], [ref.c.id], name="fk2")
+        )
+
+        ref = Table(
+            "ref",
+            m2,
+            Column("id", Integer, primary_key=True),
+        )
+        Table(
+            "t",
+            m2,
+            Column("x", Integer),
+            Column("y", Integer),
+        )
+
+        if hook_type == "object":
+
+            def include_object(object_, name, type_, reflected, compare_to):
+                return not (
+                    isinstance(object_, ForeignKeyConstraint)
+                    and type_ == "foreign_key_constraint"
+                    and reflected
+                    and name == "fk1"
+                )
+
+            diffs = self._fixture(m1, m2, object_filters=include_object)
+        elif hook_type == "name":
+
+            def include_name(name, type_, parent_names):
+                if name == "fk1":
+                    if type_ == "index":  # MariaDB thing
+                        return True
+                    eq_(type_, "foreign_key_constraint")
+                    eq_(
+                        parent_names,
+                        {
+                            "schema_name": None,
+                            "table_name": "t",
+                            "schema_qualified_table_name": "t",
+                        },
+                    )
+                    return False
+                else:
+                    return True
+
+            diffs = self._fixture(m1, m2, name_filters=include_name)
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "t",
+            ["y"],
+            "ref",
+            ["id"],
+            conditional_name="fk2",
+        )
+        eq_(len(diffs), 1)
+
+    def test_add_metadata_fk(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "ref",
+            m1,
+            Column("id", Integer, primary_key=True),
+        )
+        Table(
+            "t",
+            m1,
+            Column("x", Integer),
+            Column("y", Integer),
+        )
+
+        ref = Table(
+            "ref",
+            m2,
+            Column("id", Integer, primary_key=True),
+        )
+        t2 = Table(
+            "t",
+            m2,
+            Column("x", Integer),
+            Column("y", Integer),
+        )
+        t2.append_constraint(
+            ForeignKeyConstraint([t2.c.x], [ref.c.id], name="fk1")
+        )
+        t2.append_constraint(
+            ForeignKeyConstraint([t2.c.y], [ref.c.id], name="fk2")
+        )
+
+        def include_object(object_, name, type_, reflected, compare_to):
+            return not (
+                isinstance(object_, ForeignKeyConstraint)
+                and type_ == "foreign_key_constraint"
+                and not reflected
+                and name == "fk1"
+            )
+
+        diffs = self._fixture(m1, m2, object_filters=include_object)
+
+        self._assert_fk_diff(
+            diffs[0], "add_fk", "t", ["y"], "ref", ["id"], name="fk2"
+        )
+        eq_(len(diffs), 1)
+
+    @combinations(("object",), ("name",))
+    @config.requirements.no_name_normalize
+    def test_change_fk(self, hook_type):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        r1a = Table(
+            "ref_a",
+            m1,
+            Column("a", Integer, primary_key=True),
+        )
+        Table(
+            "ref_b",
+            m1,
+            Column("a", Integer, primary_key=True),
+            Column("b", Integer, primary_key=True),
+        )
+        t1 = Table(
+            "t",
+            m1,
+            Column("x", Integer),
+            Column("y", Integer),
+            Column("z", Integer),
+        )
+        t1.append_constraint(
+            ForeignKeyConstraint([t1.c.x], [r1a.c.a], name="fk1")
+        )
+        t1.append_constraint(
+            ForeignKeyConstraint([t1.c.y], [r1a.c.a], name="fk2")
+        )
+
+        Table(
+            "ref_a",
+            m2,
+            Column("a", Integer, primary_key=True),
+        )
+        r2b = Table(
+            "ref_b",
+            m2,
+            Column("a", Integer, primary_key=True),
+            Column("b", Integer, primary_key=True),
+        )
+        t2 = Table(
+            "t",
+            m2,
+            Column("x", Integer),
+            Column("y", Integer),
+            Column("z", Integer),
+        )
+        t2.append_constraint(
+            ForeignKeyConstraint(
+                [t2.c.x, t2.c.z], [r2b.c.a, r2b.c.b], name="fk1"
+            )
+        )
+        t2.append_constraint(
+            ForeignKeyConstraint(
+                [t2.c.y, t2.c.z], [r2b.c.a, r2b.c.b], name="fk2"
+            )
+        )
+
+        if hook_type == "object":
+
+            def include_object(object_, name, type_, reflected, compare_to):
+                return not (
+                    isinstance(object_, ForeignKeyConstraint)
+                    and type_ == "foreign_key_constraint"
+                    and name == "fk1"
+                )
+
+            diffs = self._fixture(m1, m2, object_filters=include_object)
+        elif hook_type == "name":
+
+            def include_name(name, type_, parent_names):
+                if type_ == "index":
+                    return True  # MariaDB thing
+
+                if name == "fk1":
+                    eq_(type_, "foreign_key_constraint")
+                    eq_(
+                        parent_names,
+                        {
+                            "schema_name": None,
+                            "table_name": "t",
+                            "schema_qualified_table_name": "t",
+                        },
+                    )
+                    return False
+                else:
+                    return True
+
+            diffs = self._fixture(m1, m2, name_filters=include_name)
+
+        if hook_type == "object":
+            self._assert_fk_diff(
+                diffs[0], "remove_fk", "t", ["y"], "ref_a", ["a"], name="fk2"
+            )
+            self._assert_fk_diff(
+                diffs[1],
+                "add_fk",
+                "t",
+                ["y", "z"],
+                "ref_b",
+                ["a", "b"],
+                name="fk2",
+            )
+            eq_(len(diffs), 2)
+        elif hook_type == "name":
+            eq_(
+                {(d[0], d[1].name) for d in diffs},
+                {("add_fk", "fk2"), ("add_fk", "fk1"), ("remove_fk", "fk2")},
+            )
+
+
+class AutogenerateFKOptionsTest(AutogenFixtureTest, TestBase):
+    __backend__ = True
+
+    def _fk_opts_fixture(self, old_opts, new_opts):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "some_table",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("tid", Integer),
+            ForeignKeyConstraint(["tid"], ["some_table.id"], **old_opts),
+        )
+
+        Table(
+            "some_table",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("test", String(10)),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, primary_key=True),
+            Column("name", String(50), nullable=False),
+            Column("tid", Integer),
+            ForeignKeyConstraint(["tid"], ["some_table.id"], **new_opts),
+        )
+
+        return self._fixture(m1, m2)
+
+    @config.requirements.fk_ondelete_is_reflected
+    def test_add_ondelete(self):
+        diffs = self._fk_opts_fixture({}, {"ondelete": "cascade"})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            ondelete=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            ondelete="cascade",
+        )
+
+    @config.requirements.fk_ondelete_is_reflected
+    def test_remove_ondelete(self):
+        diffs = self._fk_opts_fixture({"ondelete": "CASCADE"}, {})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            ondelete="CASCADE",
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            ondelete=None,
+        )
+
+    def test_nochange_ondelete(self):
+        """test case sensitivity"""
+        diffs = self._fk_opts_fixture(
+            {"ondelete": "caSCAde"}, {"ondelete": "CasCade"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_onupdate_is_reflected
+    def test_add_onupdate(self):
+        diffs = self._fk_opts_fixture({}, {"onupdate": "cascade"})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="cascade",
+        )
+
+    @config.requirements.fk_onupdate_is_reflected
+    def test_remove_onupdate(self):
+        diffs = self._fk_opts_fixture({"onupdate": "CASCADE"}, {})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="CASCADE",
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=None,
+        )
+
+    @config.requirements.fk_onupdate
+    def test_nochange_onupdate(self):
+        """test case sensitivity"""
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "caSCAde"}, {"onupdate": "CasCade"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_ondelete_restrict
+    def test_nochange_ondelete_restrict(self):
+        """test the RESTRICT option which MySQL doesn't report on"""
+
+        diffs = self._fk_opts_fixture(
+            {"ondelete": "restrict"}, {"ondelete": "restrict"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_onupdate_restrict
+    def test_nochange_onupdate_restrict(self):
+        """test the RESTRICT option which MySQL doesn't report on"""
+
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "restrict"}, {"onupdate": "restrict"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_ondelete_noaction
+    def test_nochange_ondelete_noaction(self):
+        """test the NO ACTION option which generally comes back as None"""
+
+        diffs = self._fk_opts_fixture(
+            {"ondelete": "no action"}, {"ondelete": "no action"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_onupdate
+    def test_nochange_onupdate_noaction(self):
+        """test the NO ACTION option which generally comes back as None"""
+
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "no action"}, {"onupdate": "no action"}
+        )
+        eq_(diffs, [])
+
+    @config.requirements.fk_ondelete_restrict
+    def test_change_ondelete_from_restrict(self):
+        """test the RESTRICT option which MySQL doesn't report on"""
+
+        # note that this is impossible to detect if we change
+        # from RESTRICT to NO ACTION on MySQL.
+        diffs = self._fk_opts_fixture(
+            {"ondelete": "restrict"}, {"ondelete": "cascade"}
+        )
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=None,
+            ondelete=mock.ANY,  # MySQL reports None, PG reports RESTRICT
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=None,
+            ondelete="cascade",
+        )
+
+    @config.requirements.fk_ondelete_restrict
+    def test_change_onupdate_from_restrict(self):
+        """test the RESTRICT option which MySQL doesn't report on"""
+
+        # note that this is impossible to detect if we change
+        # from RESTRICT to NO ACTION on MySQL.
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "restrict"}, {"onupdate": "cascade"}
+        )
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate=mock.ANY,  # MySQL reports None, PG reports RESTRICT
+            ondelete=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="cascade",
+            ondelete=None,
+        )
+
+    @config.requirements.fk_ondelete_is_reflected
+    @config.requirements.fk_onupdate_is_reflected
+    def test_ondelete_onupdate_combo(self):
+        diffs = self._fk_opts_fixture(
+            {"onupdate": "CASCADE", "ondelete": "SET NULL"},
+            {"onupdate": "RESTRICT", "ondelete": "RESTRICT"},
+        )
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="CASCADE",
+            ondelete="SET NULL",
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            onupdate="RESTRICT",
+            ondelete="RESTRICT",
+        )
+
+    @config.requirements.fk_initially
+    def test_add_initially_deferred(self):
+        diffs = self._fk_opts_fixture({}, {"initially": "deferred"})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially="deferred",
+        )
+
+    @config.requirements.fk_initially
+    def test_remove_initially_deferred(self):
+        diffs = self._fk_opts_fixture({"initially": "deferred"}, {})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially="DEFERRED",
+            deferrable=True,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,
+        )
+
+    @config.requirements.fk_deferrable
+    @config.requirements.fk_initially
+    def test_add_initially_immediate_plus_deferrable(self):
+        diffs = self._fk_opts_fixture(
+            {}, {"initially": "immediate", "deferrable": True}
+        )
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially="immediate",
+            deferrable=True,
+        )
+
+    @config.requirements.fk_deferrable
+    @config.requirements.fk_initially
+    def test_remove_initially_immediate_plus_deferrable(self):
+        diffs = self._fk_opts_fixture(
+            {"initially": "immediate", "deferrable": True}, {}
+        )
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,  # immediate is the default
+            deferrable=True,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            initially=None,
+            deferrable=None,
+        )
+
+    @config.requirements.fk_initially
+    @config.requirements.fk_deferrable
+    def test_add_initially_deferrable_nochange_one(self):
+        diffs = self._fk_opts_fixture(
+            {"deferrable": True, "initially": "immediate"},
+            {"deferrable": True, "initially": "immediate"},
+        )
+
+        eq_(diffs, [])
+
+    @config.requirements.fk_initially
+    @config.requirements.fk_deferrable
+    def test_add_initially_deferrable_nochange_two(self):
+        diffs = self._fk_opts_fixture(
+            {"deferrable": True, "initially": "deferred"},
+            {"deferrable": True, "initially": "deferred"},
+        )
+
+        eq_(diffs, [])
+
+    @config.requirements.fk_initially
+    @config.requirements.fk_deferrable
+    def test_add_initially_deferrable_nochange_three(self):
+        diffs = self._fk_opts_fixture(
+            {"deferrable": None, "initially": "deferred"},
+            {"deferrable": None, "initially": "deferred"},
+        )
+
+        eq_(diffs, [])
+
+    @config.requirements.fk_deferrable
+    def test_add_deferrable(self):
+        diffs = self._fk_opts_fixture({}, {"deferrable": True})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            deferrable=None,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            deferrable=True,
+        )
+
+    @config.requirements.fk_deferrable_is_reflected
+    def test_remove_deferrable(self):
+        diffs = self._fk_opts_fixture({"deferrable": True}, {})
+
+        self._assert_fk_diff(
+            diffs[0],
+            "remove_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            deferrable=True,
+            conditional_name="servergenerated",
+        )
+
+        self._assert_fk_diff(
+            diffs[1],
+            "add_fk",
+            "user",
+            ["tid"],
+            "some_table",
+            ["id"],
+            deferrable=None,
+        )
diff --git a/venv/Lib/site-packages/alembic/testing/suite/test_autogen_identity.py b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_identity.py
new file mode 100644
index 0000000000000000000000000000000000000000..3dee9fc9903f74fd06adfaa837f22af51ec6dcd1
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/test_autogen_identity.py
@@ -0,0 +1,226 @@
+import sqlalchemy as sa
+from sqlalchemy import Column
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+
+from alembic.util import sqla_compat
+from ._autogen_fixtures import AutogenFixtureTest
+from ... import testing
+from ...testing import config
+from ...testing import eq_
+from ...testing import is_true
+from ...testing import TestBase
+
+
+class AutogenerateIdentityTest(AutogenFixtureTest, TestBase):
+    __requires__ = ("identity_columns",)
+    __backend__ = True
+
+    def test_add_identity_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table("user", m1, Column("other", sa.Text))
+
+        Table(
+            "user",
+            m2,
+            Column("other", sa.Text),
+            Column(
+                "id",
+                Integer,
+                sa.Identity(start=5, increment=7),
+                primary_key=True,
+            ),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "add_column")
+        eq_(diffs[0][2], "user")
+        eq_(diffs[0][3].name, "id")
+        i = diffs[0][3].identity
+
+        is_true(isinstance(i, sa.Identity))
+        eq_(i.start, 5)
+        eq_(i.increment, 7)
+
+    def test_remove_identity_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column(
+                "id",
+                Integer,
+                sa.Identity(start=2, increment=3),
+                primary_key=True,
+            ),
+        )
+
+        Table("user", m2)
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs[0][0], "remove_column")
+        eq_(diffs[0][2], "user")
+        c = diffs[0][3]
+        eq_(c.name, "id")
+
+        is_true(isinstance(c.identity, sa.Identity))
+        eq_(c.identity.start, 2)
+        eq_(c.identity.increment, 3)
+
+    def test_no_change_identity_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        for m in (m1, m2):
+            id_ = sa.Identity(start=2)
+            Table("user", m, Column("id", Integer, id_))
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(diffs, [])
+
+    def test_dialect_kwargs_changes(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        if sqla_compat.identity_has_dialect_kwargs:
+            args = {"oracle_on_null": True, "oracle_order": True}
+        else:
+            args = {"on_null": True, "order": True}
+
+        Table("user", m1, Column("id", Integer, sa.Identity(start=2)))
+        id_ = sa.Identity(start=2, **args)
+        Table("user", m2, Column("id", Integer, id_))
+
+        diffs = self._fixture(m1, m2)
+        if config.db.name == "oracle":
+            is_true(len(diffs), 1)
+            eq_(diffs[0][0][0], "modify_default")
+        else:
+            eq_(diffs, [])
+
+    @testing.combinations(
+        (None, dict(start=2)),
+        (dict(start=2), None),
+        (dict(start=2), dict(start=2, increment=7)),
+        (dict(always=False), dict(always=True)),
+        (
+            dict(start=1, minvalue=0, maxvalue=100, cycle=True),
+            dict(start=1, minvalue=0, maxvalue=100, cycle=False),
+        ),
+        (
+            dict(start=10, increment=3, maxvalue=9999),
+            dict(start=10, increment=1, maxvalue=3333),
+        ),
+    )
+    @config.requirements.identity_columns_alter
+    def test_change_identity(self, before, after):
+        arg_before = (sa.Identity(**before),) if before else ()
+        arg_after = (sa.Identity(**after),) if after else ()
+
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, *arg_before),
+            Column("other", sa.Text),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, *arg_after),
+            Column("other", sa.Text),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(len(diffs[0]), 1)
+        diffs = diffs[0][0]
+        eq_(diffs[0], "modify_default")
+        eq_(diffs[2], "user")
+        eq_(diffs[3], "id")
+        old = diffs[5]
+        new = diffs[6]
+
+        def check(kw, idt):
+            if kw:
+                is_true(isinstance(idt, sa.Identity))
+                for k, v in kw.items():
+                    eq_(getattr(idt, k), v)
+            else:
+                is_true(idt in (None, False))
+
+        check(before, old)
+        check(after, new)
+
+    def test_add_identity_to_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer),
+            Column("other", sa.Text),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer, sa.Identity(start=2, maxvalue=1000)),
+            Column("other", sa.Text),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(len(diffs[0]), 1)
+        diffs = diffs[0][0]
+        eq_(diffs[0], "modify_default")
+        eq_(diffs[2], "user")
+        eq_(diffs[3], "id")
+        eq_(diffs[5], None)
+        added = diffs[6]
+
+        is_true(isinstance(added, sa.Identity))
+        eq_(added.start, 2)
+        eq_(added.maxvalue, 1000)
+
+    def test_remove_identity_from_column(self):
+        m1 = MetaData()
+        m2 = MetaData()
+
+        Table(
+            "user",
+            m1,
+            Column("id", Integer, sa.Identity(start=2, maxvalue=1000)),
+            Column("other", sa.Text),
+        )
+
+        Table(
+            "user",
+            m2,
+            Column("id", Integer),
+            Column("other", sa.Text),
+        )
+
+        diffs = self._fixture(m1, m2)
+
+        eq_(len(diffs[0]), 1)
+        diffs = diffs[0][0]
+        eq_(diffs[0], "modify_default")
+        eq_(diffs[2], "user")
+        eq_(diffs[3], "id")
+        eq_(diffs[6], None)
+        removed = diffs[5]
+
+        is_true(isinstance(removed, sa.Identity))
diff --git a/venv/Lib/site-packages/alembic/testing/suite/test_environment.py b/venv/Lib/site-packages/alembic/testing/suite/test_environment.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c86859ae2cff2b0497593dff6fc547a2487bbe3
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/test_environment.py
@@ -0,0 +1,364 @@
+import io
+
+from ...migration import MigrationContext
+from ...testing import assert_raises
+from ...testing import config
+from ...testing import eq_
+from ...testing import is_
+from ...testing import is_false
+from ...testing import is_not_
+from ...testing import is_true
+from ...testing import ne_
+from ...testing.fixtures import TestBase
+
+
+class MigrationTransactionTest(TestBase):
+    __backend__ = True
+
+    conn = None
+
+    def _fixture(self, opts):
+        self.conn = conn = config.db.connect()
+
+        if opts.get("as_sql", False):
+            self.context = MigrationContext.configure(
+                dialect=conn.dialect, opts=opts
+            )
+            self.context.output_buffer = (
+                self.context.impl.output_buffer
+            ) = io.StringIO()
+        else:
+            self.context = MigrationContext.configure(
+                connection=conn, opts=opts
+            )
+        return self.context
+
+    def teardown_method(self):
+        if self.conn:
+            self.conn.close()
+
+    def test_proxy_transaction_rollback(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+
+        is_false(self.conn.in_transaction())
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+        proxy.rollback()
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_commit(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+        proxy.commit()
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_contextmanager_commit(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+        with proxy:
+            pass
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_contextmanager_rollback(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+
+        def go():
+            with proxy:
+                raise Exception("hi")
+
+        assert_raises(Exception, go)
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_contextmanager_explicit_rollback(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+
+        with proxy:
+            is_true(self.conn.in_transaction())
+            proxy.rollback()
+            is_false(self.conn.in_transaction())
+
+        is_false(self.conn.in_transaction())
+
+    def test_proxy_transaction_contextmanager_explicit_commit(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+        proxy = context.begin_transaction(_per_migration=True)
+        is_true(self.conn.in_transaction())
+
+        with proxy:
+            is_true(self.conn.in_transaction())
+            proxy.commit()
+            is_false(self.conn.in_transaction())
+
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_migration_transactional_ddl(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": True}
+        )
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_false(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+            is_false(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_migration_non_transactional_ddl(self):
+        context = self._fixture(
+            {"transaction_per_migration": True, "transactional_ddl": False}
+        )
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_false(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+            is_false(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_all_transactional_ddl(self):
+        context = self._fixture({"transactional_ddl": True})
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_true(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+            is_true(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_all_non_transactional_ddl(self):
+        context = self._fixture({"transactional_ddl": False})
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_false(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+            is_false(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    def test_transaction_per_all_sqlmode(self):
+        context = self._fixture({"as_sql": True})
+
+        context.execute("step 1")
+        with context.begin_transaction():
+            context.execute("step 2")
+            with context.begin_transaction(_per_migration=True):
+                context.execute("step 3")
+
+            context.execute("step 4")
+        context.execute("step 5")
+
+        if context.impl.transactional_ddl:
+            self._assert_impl_steps(
+                "step 1",
+                "BEGIN",
+                "step 2",
+                "step 3",
+                "step 4",
+                "COMMIT",
+                "step 5",
+            )
+        else:
+            self._assert_impl_steps(
+                "step 1", "step 2", "step 3", "step 4", "step 5"
+            )
+
+    def test_transaction_per_migration_sqlmode(self):
+        context = self._fixture(
+            {"as_sql": True, "transaction_per_migration": True}
+        )
+
+        context.execute("step 1")
+        with context.begin_transaction():
+            context.execute("step 2")
+            with context.begin_transaction(_per_migration=True):
+                context.execute("step 3")
+
+            context.execute("step 4")
+        context.execute("step 5")
+
+        if context.impl.transactional_ddl:
+            self._assert_impl_steps(
+                "step 1",
+                "step 2",
+                "BEGIN",
+                "step 3",
+                "COMMIT",
+                "step 4",
+                "step 5",
+            )
+        else:
+            self._assert_impl_steps(
+                "step 1", "step 2", "step 3", "step 4", "step 5"
+            )
+
+    @config.requirements.autocommit_isolation
+    def test_autocommit_block(self):
+        context = self._fixture({"transaction_per_migration": True})
+
+        is_false(self.conn.in_transaction())
+
+        with context.begin_transaction():
+            is_false(self.conn.in_transaction())
+            with context.begin_transaction(_per_migration=True):
+                is_true(self.conn.in_transaction())
+
+                with context.autocommit_block():
+                    # in 1.x, self.conn is separate due to the
+                    # execution_options call.  however for future they are the
+                    # same connection and there is a "transaction" block
+                    # despite autocommit
+                    if self.is_sqlalchemy_future:
+                        is_(context.connection, self.conn)
+                    else:
+                        is_not_(context.connection, self.conn)
+                        is_false(self.conn.in_transaction())
+
+                    eq_(
+                        context.connection._execution_options[
+                            "isolation_level"
+                        ],
+                        "AUTOCOMMIT",
+                    )
+
+                ne_(
+                    context.connection._execution_options.get(
+                        "isolation_level", None
+                    ),
+                    "AUTOCOMMIT",
+                )
+                is_true(self.conn.in_transaction())
+
+            is_false(self.conn.in_transaction())
+        is_false(self.conn.in_transaction())
+
+    @config.requirements.autocommit_isolation
+    def test_autocommit_block_no_transaction(self):
+        context = self._fixture({"transaction_per_migration": True})
+
+        is_false(self.conn.in_transaction())
+
+        with context.autocommit_block():
+            is_true(context.connection.in_transaction())
+
+            # in 1.x, self.conn is separate due to the execution_options
+            # call.  however for future they are the same connection and there
+            # is a "transaction" block despite autocommit
+            if self.is_sqlalchemy_future:
+                is_(context.connection, self.conn)
+            else:
+                is_not_(context.connection, self.conn)
+                is_false(self.conn.in_transaction())
+
+            eq_(
+                context.connection._execution_options["isolation_level"],
+                "AUTOCOMMIT",
+            )
+
+        ne_(
+            context.connection._execution_options.get("isolation_level", None),
+            "AUTOCOMMIT",
+        )
+
+        is_false(self.conn.in_transaction())
+
+    def test_autocommit_block_transactional_ddl_sqlmode(self):
+        context = self._fixture(
+            {
+                "transaction_per_migration": True,
+                "transactional_ddl": True,
+                "as_sql": True,
+            }
+        )
+
+        with context.begin_transaction():
+            context.execute("step 1")
+            with context.begin_transaction(_per_migration=True):
+                context.execute("step 2")
+
+                with context.autocommit_block():
+                    context.execute("step 3")
+
+                context.execute("step 4")
+
+            context.execute("step 5")
+
+        self._assert_impl_steps(
+            "step 1",
+            "BEGIN",
+            "step 2",
+            "COMMIT",
+            "step 3",
+            "BEGIN",
+            "step 4",
+            "COMMIT",
+            "step 5",
+        )
+
+    def test_autocommit_block_nontransactional_ddl_sqlmode(self):
+        context = self._fixture(
+            {
+                "transaction_per_migration": True,
+                "transactional_ddl": False,
+                "as_sql": True,
+            }
+        )
+
+        with context.begin_transaction():
+            context.execute("step 1")
+            with context.begin_transaction(_per_migration=True):
+                context.execute("step 2")
+
+                with context.autocommit_block():
+                    context.execute("step 3")
+
+                context.execute("step 4")
+
+            context.execute("step 5")
+
+        self._assert_impl_steps(
+            "step 1", "step 2", "step 3", "step 4", "step 5"
+        )
+
+    def _assert_impl_steps(self, *steps):
+        to_check = self.context.output_buffer.getvalue()
+
+        self.context.impl.output_buffer = buf = io.StringIO()
+        for step in steps:
+            if step == "BEGIN":
+                self.context.impl.emit_begin()
+            elif step == "COMMIT":
+                self.context.impl.emit_commit()
+            else:
+                self.context.impl._exec(step)
+
+        eq_(to_check, buf.getvalue())
diff --git a/venv/Lib/site-packages/alembic/testing/suite/test_op.py b/venv/Lib/site-packages/alembic/testing/suite/test_op.py
new file mode 100644
index 0000000000000000000000000000000000000000..a63b3f2f9f2a7bfd879e5b06a05f0c6a64277b8f
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/suite/test_op.py
@@ -0,0 +1,42 @@
+"""Test against the builders in the op.* module."""
+
+from sqlalchemy import Column
+from sqlalchemy import event
+from sqlalchemy import Integer
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy.sql import text
+
+from ...testing.fixtures import AlterColRoundTripFixture
+from ...testing.fixtures import TestBase
+
+
+@event.listens_for(Table, "after_parent_attach")
+def _add_cols(table, metadata):
+    if table.name == "tbl_with_auto_appended_column":
+        table.append_column(Column("bat", Integer))
+
+
+class BackendAlterColumnTest(AlterColRoundTripFixture, TestBase):
+    __backend__ = True
+
+    def test_rename_column(self):
+        self._run_alter_col({}, {"name": "newname"})
+
+    def test_modify_type_int_str(self):
+        self._run_alter_col({"type": Integer()}, {"type": String(50)})
+
+    def test_add_server_default_int(self):
+        self._run_alter_col({"type": Integer}, {"server_default": text("5")})
+
+    def test_modify_server_default_int(self):
+        self._run_alter_col(
+            {"type": Integer, "server_default": text("2")},
+            {"server_default": text("5")},
+        )
+
+    def test_modify_nullable_to_non(self):
+        self._run_alter_col({}, {"nullable": False})
+
+    def test_modify_non_nullable_to_nullable(self):
+        self._run_alter_col({"nullable": False}, {"nullable": True})
diff --git a/venv/Lib/site-packages/alembic/testing/util.py b/venv/Lib/site-packages/alembic/testing/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..4517a69f6b5c4ebdc34702005074e83178cc9d95
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/util.py
@@ -0,0 +1,126 @@
+# testing/util.py
+# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+from __future__ import annotations
+
+import types
+from typing import Union
+
+from sqlalchemy.util import inspect_getfullargspec
+
+from ..util import sqla_2
+
+
+def flag_combinations(*combinations):
+    """A facade around @testing.combinations() oriented towards boolean
+    keyword-based arguments.
+
+    Basically generates a nice looking identifier based on the keywords
+    and also sets up the argument names.
+
+    E.g.::
+
+        @testing.flag_combinations(
+            dict(lazy=False, passive=False),
+            dict(lazy=True, passive=False),
+            dict(lazy=False, passive=True),
+            dict(lazy=False, passive=True, raiseload=True),
+        )
+
+
+    would result in::
+
+        @testing.combinations(
+            ('', False, False, False),
+            ('lazy', True, False, False),
+            ('lazy_passive', True, True, False),
+            ('lazy_passive', True, True, True),
+            id_='iaaa',
+            argnames='lazy,passive,raiseload'
+        )
+
+    """
+    from sqlalchemy.testing import config
+
+    keys = set()
+
+    for d in combinations:
+        keys.update(d)
+
+    keys = sorted(keys)
+
+    return config.combinations(
+        *[
+            ("_".join(k for k in keys if d.get(k, False)),)
+            + tuple(d.get(k, False) for k in keys)
+            for d in combinations
+        ],
+        id_="i" + ("a" * len(keys)),
+        argnames=",".join(keys),
+    )
+
+
+def resolve_lambda(__fn, **kw):
+    """Given a no-arg lambda and a namespace, return a new lambda that
+    has all the values filled in.
+
+    This is used so that we can have module-level fixtures that
+    refer to instance-level variables using lambdas.
+
+    """
+
+    pos_args = inspect_getfullargspec(__fn)[0]
+    pass_pos_args = {arg: kw.pop(arg) for arg in pos_args}
+    glb = dict(__fn.__globals__)
+    glb.update(kw)
+    new_fn = types.FunctionType(__fn.__code__, glb)
+    return new_fn(**pass_pos_args)
+
+
+def metadata_fixture(ddl="function"):
+    """Provide MetaData for a pytest fixture."""
+
+    from sqlalchemy.testing import config
+    from . import fixture_functions
+
+    def decorate(fn):
+        def run_ddl(self):
+            from sqlalchemy import schema
+
+            metadata = self.metadata = schema.MetaData()
+            try:
+                result = fn(self, metadata)
+                metadata.create_all(config.db)
+                # TODO:
+                # somehow get a per-function dml erase fixture here
+                yield result
+            finally:
+                metadata.drop_all(config.db)
+
+        return fixture_functions.fixture(scope=ddl)(run_ddl)
+
+    return decorate
+
+
+def _safe_int(value: str) -> Union[int, str]:
+    try:
+        return int(value)
+    except:
+        return value
+
+
+def testing_engine(url=None, options=None, future=False):
+    from sqlalchemy.testing import config
+    from sqlalchemy.testing.engines import testing_engine
+
+    if not future:
+        future = getattr(config._current.options, "future_engine", False)
+
+    if not sqla_2:
+        kw = {"future": future} if future else {}
+    else:
+        kw = {}
+    return testing_engine(url, options, **kw)
diff --git a/venv/Lib/site-packages/alembic/testing/warnings.py b/venv/Lib/site-packages/alembic/testing/warnings.py
new file mode 100644
index 0000000000000000000000000000000000000000..e87136b85f380dbc741ee7f860740d3423bab383
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/testing/warnings.py
@@ -0,0 +1,40 @@
+# testing/warnings.py
+# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+
+import warnings
+
+from sqlalchemy import exc as sa_exc
+
+from ..util import sqla_14
+
+
+def setup_filters():
+    """Set global warning behavior for the test suite."""
+
+    warnings.resetwarnings()
+
+    warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning)
+    warnings.filterwarnings("error", category=sa_exc.SAWarning)
+
+    # some selected deprecations...
+    warnings.filterwarnings("error", category=DeprecationWarning)
+    if not sqla_14:
+        # 1.3 uses pkg_resources in PluginLoader
+        warnings.filterwarnings(
+            "ignore",
+            "pkg_resources is deprecated as an API",
+            DeprecationWarning,
+        )
+    try:
+        import pytest
+    except ImportError:
+        pass
+    else:
+        warnings.filterwarnings(
+            "once", category=pytest.PytestDeprecationWarning
+        )
diff --git a/venv/Lib/site-packages/alembic/util/__init__.py b/venv/Lib/site-packages/alembic/util/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4724e1f0847c4b8fe942e85285970c117acfa915
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/util/__init__.py
@@ -0,0 +1,35 @@
+from .editor import open_in_editor as open_in_editor
+from .exc import AutogenerateDiffsDetected as AutogenerateDiffsDetected
+from .exc import CommandError as CommandError
+from .langhelpers import _with_legacy_names as _with_legacy_names
+from .langhelpers import asbool as asbool
+from .langhelpers import dedupe_tuple as dedupe_tuple
+from .langhelpers import Dispatcher as Dispatcher
+from .langhelpers import EMPTY_DICT as EMPTY_DICT
+from .langhelpers import immutabledict as immutabledict
+from .langhelpers import memoized_property as memoized_property
+from .langhelpers import ModuleClsProxy as ModuleClsProxy
+from .langhelpers import not_none as not_none
+from .langhelpers import rev_id as rev_id
+from .langhelpers import to_list as to_list
+from .langhelpers import to_tuple as to_tuple
+from .langhelpers import unique_list as unique_list
+from .messaging import err as err
+from .messaging import format_as_comma as format_as_comma
+from .messaging import msg as msg
+from .messaging import obfuscate_url_pw as obfuscate_url_pw
+from .messaging import status as status
+from .messaging import warn as warn
+from .messaging import write_outstream as write_outstream
+from .pyfiles import coerce_resource_to_filename as coerce_resource_to_filename
+from .pyfiles import load_python_file as load_python_file
+from .pyfiles import pyc_file_from_path as pyc_file_from_path
+from .pyfiles import template_to_file as template_to_file
+from .sqla_compat import has_computed as has_computed
+from .sqla_compat import sqla_13 as sqla_13
+from .sqla_compat import sqla_14 as sqla_14
+from .sqla_compat import sqla_2 as sqla_2
+
+
+if not sqla_13:
+    raise CommandError("SQLAlchemy 1.3.0 or greater is required.")
diff --git a/venv/Lib/site-packages/alembic/util/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/alembic/util/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3543cc39baea1b3d509b53868f84595f1f08f900
Binary files /dev/null and b/venv/Lib/site-packages/alembic/util/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/util/__pycache__/compat.cpython-311.pyc b/venv/Lib/site-packages/alembic/util/__pycache__/compat.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b060fb6559233b9557adda15cf5590be772ef1b
Binary files /dev/null and b/venv/Lib/site-packages/alembic/util/__pycache__/compat.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/util/__pycache__/editor.cpython-311.pyc b/venv/Lib/site-packages/alembic/util/__pycache__/editor.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9fd21065d1b879c4ed131ca8c38f32f0c47a772
Binary files /dev/null and b/venv/Lib/site-packages/alembic/util/__pycache__/editor.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/util/__pycache__/exc.cpython-311.pyc b/venv/Lib/site-packages/alembic/util/__pycache__/exc.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc0d8dfb828138c2af318723508864eead478fca
Binary files /dev/null and b/venv/Lib/site-packages/alembic/util/__pycache__/exc.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/util/__pycache__/langhelpers.cpython-311.pyc b/venv/Lib/site-packages/alembic/util/__pycache__/langhelpers.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ac0eb4d8c96b0b4a535a325caee290a92ea8f73
Binary files /dev/null and b/venv/Lib/site-packages/alembic/util/__pycache__/langhelpers.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/util/__pycache__/messaging.cpython-311.pyc b/venv/Lib/site-packages/alembic/util/__pycache__/messaging.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..890fc55a655a5d633e223923cde0d44729ce953e
Binary files /dev/null and b/venv/Lib/site-packages/alembic/util/__pycache__/messaging.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/util/__pycache__/pyfiles.cpython-311.pyc b/venv/Lib/site-packages/alembic/util/__pycache__/pyfiles.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..caaf74af10f49fd4ba14e0d8842bc016a832873b
Binary files /dev/null and b/venv/Lib/site-packages/alembic/util/__pycache__/pyfiles.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/util/__pycache__/sqla_compat.cpython-311.pyc b/venv/Lib/site-packages/alembic/util/__pycache__/sqla_compat.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44e74c387e7737504c9005f7305e069fdbfb5802
Binary files /dev/null and b/venv/Lib/site-packages/alembic/util/__pycache__/sqla_compat.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/alembic/util/compat.py b/venv/Lib/site-packages/alembic/util/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..e185cc417204295070406b9a77231c48b3d6c38e
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/util/compat.py
@@ -0,0 +1,89 @@
+# mypy: no-warn-unused-ignores
+
+from __future__ import annotations
+
+from configparser import ConfigParser
+import io
+import os
+import sys
+import typing
+from typing import Any
+from typing import List
+from typing import Optional
+from typing import Sequence
+from typing import Union
+
+if True:
+    # zimports hack for too-long names
+    from sqlalchemy.util import (  # noqa: F401
+        inspect_getfullargspec as inspect_getfullargspec,
+    )
+    from sqlalchemy.util.compat import (  # noqa: F401
+        inspect_formatargspec as inspect_formatargspec,
+    )
+
+is_posix = os.name == "posix"
+
+py311 = sys.version_info >= (3, 11)
+py310 = sys.version_info >= (3, 10)
+py39 = sys.version_info >= (3, 9)
+
+
+# produce a wrapper that allows encoded text to stream
+# into a given buffer, but doesn't close it.
+# not sure of a more idiomatic approach to this.
+class EncodedIO(io.TextIOWrapper):
+    def close(self) -> None:
+        pass
+
+
+if py39:
+    from importlib import resources as _resources
+
+    importlib_resources = _resources
+    from importlib import metadata as _metadata
+
+    importlib_metadata = _metadata
+    from importlib.metadata import EntryPoint as EntryPoint
+else:
+    import importlib_resources  # type:ignore # noqa
+    import importlib_metadata  # type:ignore # noqa
+    from importlib_metadata import EntryPoint  # type:ignore # noqa
+
+
+def importlib_metadata_get(group: str) -> Sequence[EntryPoint]:
+    ep = importlib_metadata.entry_points()
+    if hasattr(ep, "select"):
+        return ep.select(group=group)
+    else:
+        return ep.get(group, ())  # type: ignore
+
+
+def formatannotation_fwdref(
+    annotation: Any, base_module: Optional[Any] = None
+) -> str:
+    """vendored from python 3.7"""
+    # copied over _formatannotation from sqlalchemy 2.0
+
+    if isinstance(annotation, str):
+        return annotation
+
+    if getattr(annotation, "__module__", None) == "typing":
+        return repr(annotation).replace("typing.", "").replace("~", "")
+    if isinstance(annotation, type):
+        if annotation.__module__ in ("builtins", base_module):
+            return repr(annotation.__qualname__)
+        return annotation.__module__ + "." + annotation.__qualname__
+    elif isinstance(annotation, typing.TypeVar):
+        return repr(annotation).replace("~", "")
+    return repr(annotation).replace("~", "")
+
+
+def read_config_parser(
+    file_config: ConfigParser,
+    file_argument: Sequence[Union[str, os.PathLike[str]]],
+) -> List[str]:
+    if py310:
+        return file_config.read(file_argument, encoding="locale")
+    else:
+        return file_config.read(file_argument)
diff --git a/venv/Lib/site-packages/alembic/util/editor.py b/venv/Lib/site-packages/alembic/util/editor.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1d1557f74c8977efa0b22535f45f44a2c9e2564
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/util/editor.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+import os
+from os.path import exists
+from os.path import join
+from os.path import splitext
+from subprocess import check_call
+from typing import Dict
+from typing import List
+from typing import Mapping
+from typing import Optional
+
+from .compat import is_posix
+from .exc import CommandError
+
+
+def open_in_editor(
+    filename: str, environ: Optional[Dict[str, str]] = None
+) -> None:
+    """
+    Opens the given file in a text editor. If the environment variable
+    ``EDITOR`` is set, this is taken as preference.
+
+    Otherwise, a list of commonly installed editors is tried.
+
+    If no editor matches, an :py:exc:`OSError` is raised.
+
+    :param filename: The filename to open. Will be passed  verbatim to the
+        editor command.
+    :param environ: An optional drop-in replacement for ``os.environ``. Used
+        mainly for testing.
+    """
+    env = os.environ if environ is None else environ
+    try:
+        editor = _find_editor(env)
+        check_call([editor, filename])
+    except Exception as exc:
+        raise CommandError("Error executing editor (%s)" % (exc,)) from exc
+
+
+def _find_editor(environ: Mapping[str, str]) -> str:
+    candidates = _default_editors()
+    for i, var in enumerate(("EDITOR", "VISUAL")):
+        if var in environ:
+            user_choice = environ[var]
+            if exists(user_choice):
+                return user_choice
+            if os.sep not in user_choice:
+                candidates.insert(i, user_choice)
+
+    for candidate in candidates:
+        path = _find_executable(candidate, environ)
+        if path is not None:
+            return path
+    raise OSError(
+        "No suitable editor found. Please set the "
+        '"EDITOR" or "VISUAL" environment variables'
+    )
+
+
+def _find_executable(
+    candidate: str, environ: Mapping[str, str]
+) -> Optional[str]:
+    # Assuming this is on the PATH, we need to determine it's absolute
+    # location. Otherwise, ``check_call`` will fail
+    if not is_posix and splitext(candidate)[1] != ".exe":
+        candidate += ".exe"
+    for path in environ.get("PATH", "").split(os.pathsep):
+        value = join(path, candidate)
+        if exists(value):
+            return value
+    return None
+
+
+def _default_editors() -> List[str]:
+    # Look for an editor. Prefer the user's choice by env-var, fall back to
+    # most commonly installed editor (nano/vim)
+    if is_posix:
+        return ["sensible-editor", "editor", "nano", "vim", "code"]
+    else:
+        return ["code.exe", "notepad++.exe", "notepad.exe"]
diff --git a/venv/Lib/site-packages/alembic/util/exc.py b/venv/Lib/site-packages/alembic/util/exc.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d0496b1e2967c5bdaca854531a9a2df339e425f
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/util/exc.py
@@ -0,0 +1,6 @@
+class CommandError(Exception):
+    pass
+
+
+class AutogenerateDiffsDetected(CommandError):
+    pass
diff --git a/venv/Lib/site-packages/alembic/util/langhelpers.py b/venv/Lib/site-packages/alembic/util/langhelpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a5bf09a98bba393e5d61a8abf67ba011ab52b71
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/util/langhelpers.py
@@ -0,0 +1,335 @@
+from __future__ import annotations
+
+import collections
+from collections.abc import Iterable
+import textwrap
+from typing import Any
+from typing import Callable
+from typing import cast
+from typing import Dict
+from typing import List
+from typing import Mapping
+from typing import MutableMapping
+from typing import NoReturn
+from typing import Optional
+from typing import overload
+from typing import Sequence
+from typing import Set
+from typing import Tuple
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+import uuid
+import warnings
+
+from sqlalchemy.util import asbool as asbool  # noqa: F401
+from sqlalchemy.util import immutabledict as immutabledict  # noqa: F401
+from sqlalchemy.util import to_list as to_list  # noqa: F401
+from sqlalchemy.util import unique_list as unique_list
+
+from .compat import inspect_getfullargspec
+
+if True:
+    # zimports workaround :(
+    from sqlalchemy.util import (  # noqa: F401
+        memoized_property as memoized_property,
+    )
+
+
+EMPTY_DICT: Mapping[Any, Any] = immutabledict()
+_T = TypeVar("_T", bound=Any)
+
+_C = TypeVar("_C", bound=Callable[..., Any])
+
+
+class _ModuleClsMeta(type):
+    def __setattr__(cls, key: str, value: Callable[..., Any]) -> None:
+        super().__setattr__(key, value)
+        cls._update_module_proxies(key)  # type: ignore
+
+
+class ModuleClsProxy(metaclass=_ModuleClsMeta):
+    """Create module level proxy functions for the
+    methods on a given class.
+
+    The functions will have a compatible signature
+    as the methods.
+
+    """
+
+    _setups: Dict[
+        Type[Any],
+        Tuple[
+            Set[str],
+            List[Tuple[MutableMapping[str, Any], MutableMapping[str, Any]]],
+        ],
+    ] = collections.defaultdict(lambda: (set(), []))
+
+    @classmethod
+    def _update_module_proxies(cls, name: str) -> None:
+        attr_names, modules = cls._setups[cls]
+        for globals_, locals_ in modules:
+            cls._add_proxied_attribute(name, globals_, locals_, attr_names)
+
+    def _install_proxy(self) -> None:
+        attr_names, modules = self._setups[self.__class__]
+        for globals_, locals_ in modules:
+            globals_["_proxy"] = self
+            for attr_name in attr_names:
+                globals_[attr_name] = getattr(self, attr_name)
+
+    def _remove_proxy(self) -> None:
+        attr_names, modules = self._setups[self.__class__]
+        for globals_, locals_ in modules:
+            globals_["_proxy"] = None
+            for attr_name in attr_names:
+                del globals_[attr_name]
+
+    @classmethod
+    def create_module_class_proxy(
+        cls,
+        globals_: MutableMapping[str, Any],
+        locals_: MutableMapping[str, Any],
+    ) -> None:
+        attr_names, modules = cls._setups[cls]
+        modules.append((globals_, locals_))
+        cls._setup_proxy(globals_, locals_, attr_names)
+
+    @classmethod
+    def _setup_proxy(
+        cls,
+        globals_: MutableMapping[str, Any],
+        locals_: MutableMapping[str, Any],
+        attr_names: Set[str],
+    ) -> None:
+        for methname in dir(cls):
+            cls._add_proxied_attribute(methname, globals_, locals_, attr_names)
+
+    @classmethod
+    def _add_proxied_attribute(
+        cls,
+        methname: str,
+        globals_: MutableMapping[str, Any],
+        locals_: MutableMapping[str, Any],
+        attr_names: Set[str],
+    ) -> None:
+        if not methname.startswith("_"):
+            meth = getattr(cls, methname)
+            if callable(meth):
+                locals_[methname] = cls._create_method_proxy(
+                    methname, globals_, locals_
+                )
+            else:
+                attr_names.add(methname)
+
+    @classmethod
+    def _create_method_proxy(
+        cls,
+        name: str,
+        globals_: MutableMapping[str, Any],
+        locals_: MutableMapping[str, Any],
+    ) -> Callable[..., Any]:
+        fn = getattr(cls, name)
+
+        def _name_error(name: str, from_: Exception) -> NoReturn:
+            raise NameError(
+                "Can't invoke function '%s', as the proxy object has "
+                "not yet been "
+                "established for the Alembic '%s' class.  "
+                "Try placing this code inside a callable."
+                % (name, cls.__name__)
+            ) from from_
+
+        globals_["_name_error"] = _name_error
+
+        translations = getattr(fn, "_legacy_translations", [])
+        if translations:
+            spec = inspect_getfullargspec(fn)
+            if spec[0] and spec[0][0] == "self":
+                spec[0].pop(0)
+
+            outer_args = inner_args = "*args, **kw"
+            translate_str = "args, kw = _translate(%r, %r, %r, args, kw)" % (
+                fn.__name__,
+                tuple(spec),
+                translations,
+            )
+
+            def translate(
+                fn_name: str, spec: Any, translations: Any, args: Any, kw: Any
+            ) -> Any:
+                return_kw = {}
+                return_args = []
+
+                for oldname, newname in translations:
+                    if oldname in kw:
+                        warnings.warn(
+                            "Argument %r is now named %r "
+                            "for method %s()." % (oldname, newname, fn_name)
+                        )
+                        return_kw[newname] = kw.pop(oldname)
+                return_kw.update(kw)
+
+                args = list(args)
+                if spec[3]:
+                    pos_only = spec[0][: -len(spec[3])]
+                else:
+                    pos_only = spec[0]
+                for arg in pos_only:
+                    if arg not in return_kw:
+                        try:
+                            return_args.append(args.pop(0))
+                        except IndexError:
+                            raise TypeError(
+                                "missing required positional argument: %s"
+                                % arg
+                            )
+                return_args.extend(args)
+
+                return return_args, return_kw
+
+            globals_["_translate"] = translate
+        else:
+            outer_args = "*args, **kw"
+            inner_args = "*args, **kw"
+            translate_str = ""
+
+        func_text = textwrap.dedent(
+            """\
+        def %(name)s(%(args)s):
+            %(doc)r
+            %(translate)s
+            try:
+                p = _proxy
+            except NameError as ne:
+                _name_error('%(name)s', ne)
+            return _proxy.%(name)s(%(apply_kw)s)
+            e
+        """
+            % {
+                "name": name,
+                "translate": translate_str,
+                "args": outer_args,
+                "apply_kw": inner_args,
+                "doc": fn.__doc__,
+            }
+        )
+        lcl: MutableMapping[str, Any] = {}
+
+        exec(func_text, cast("Dict[str, Any]", globals_), lcl)
+        return cast("Callable[..., Any]", lcl[name])
+
+
+def _with_legacy_names(translations: Any) -> Any:
+    def decorate(fn: _C) -> _C:
+        fn._legacy_translations = translations  # type: ignore[attr-defined]
+        return fn
+
+    return decorate
+
+
+def rev_id() -> str:
+    return uuid.uuid4().hex[-12:]
+
+
+@overload
+def to_tuple(x: Any, default: Tuple[Any, ...]) -> Tuple[Any, ...]:
+    ...
+
+
+@overload
+def to_tuple(x: None, default: Optional[_T] = ...) -> _T:
+    ...
+
+
+@overload
+def to_tuple(
+    x: Any, default: Optional[Tuple[Any, ...]] = None
+) -> Tuple[Any, ...]:
+    ...
+
+
+def to_tuple(
+    x: Any, default: Optional[Tuple[Any, ...]] = None
+) -> Optional[Tuple[Any, ...]]:
+    if x is None:
+        return default
+    elif isinstance(x, str):
+        return (x,)
+    elif isinstance(x, Iterable):
+        return tuple(x)
+    else:
+        return (x,)
+
+
+def dedupe_tuple(tup: Tuple[str, ...]) -> Tuple[str, ...]:
+    return tuple(unique_list(tup))
+
+
+class Dispatcher:
+    def __init__(self, uselist: bool = False) -> None:
+        self._registry: Dict[Tuple[Any, ...], Any] = {}
+        self.uselist = uselist
+
+    def dispatch_for(
+        self, target: Any, qualifier: str = "default"
+    ) -> Callable[[_C], _C]:
+        def decorate(fn: _C) -> _C:
+            if self.uselist:
+                self._registry.setdefault((target, qualifier), []).append(fn)
+            else:
+                assert (target, qualifier) not in self._registry
+                self._registry[(target, qualifier)] = fn
+            return fn
+
+        return decorate
+
+    def dispatch(self, obj: Any, qualifier: str = "default") -> Any:
+        if isinstance(obj, str):
+            targets: Sequence[Any] = [obj]
+        elif isinstance(obj, type):
+            targets = obj.__mro__
+        else:
+            targets = type(obj).__mro__
+
+        for spcls in targets:
+            if qualifier != "default" and (spcls, qualifier) in self._registry:
+                return self._fn_or_list(self._registry[(spcls, qualifier)])
+            elif (spcls, "default") in self._registry:
+                return self._fn_or_list(self._registry[(spcls, "default")])
+        else:
+            raise ValueError("no dispatch function for object: %s" % obj)
+
+    def _fn_or_list(
+        self, fn_or_list: Union[List[Callable[..., Any]], Callable[..., Any]]
+    ) -> Callable[..., Any]:
+        if self.uselist:
+
+            def go(*arg: Any, **kw: Any) -> None:
+                if TYPE_CHECKING:
+                    assert isinstance(fn_or_list, Sequence)
+                for fn in fn_or_list:
+                    fn(*arg, **kw)
+
+            return go
+        else:
+            return fn_or_list  # type: ignore
+
+    def branch(self) -> Dispatcher:
+        """Return a copy of this dispatcher that is independently
+        writable."""
+
+        d = Dispatcher()
+        if self.uselist:
+            d._registry.update(
+                (k, [fn for fn in self._registry[k]]) for k in self._registry
+            )
+        else:
+            d._registry.update(self._registry)
+        return d
+
+
+def not_none(value: Optional[_T]) -> _T:
+    assert value is not None
+    return value
diff --git a/venv/Lib/site-packages/alembic/util/messaging.py b/venv/Lib/site-packages/alembic/util/messaging.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f14d597554e281ee098afbbb0f2864cec0fd2e8
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/util/messaging.py
@@ -0,0 +1,115 @@
+from __future__ import annotations
+
+from collections.abc import Iterable
+from contextlib import contextmanager
+import logging
+import sys
+import textwrap
+from typing import Iterator
+from typing import Optional
+from typing import TextIO
+from typing import Union
+import warnings
+
+from sqlalchemy.engine import url
+
+from . import sqla_compat
+
+log = logging.getLogger(__name__)
+
+# disable "no handler found" errors
+logging.getLogger("alembic").addHandler(logging.NullHandler())
+
+
+try:
+    import fcntl
+    import termios
+    import struct
+
+    ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0))
+    _h, TERMWIDTH, _hp, _wp = struct.unpack("HHHH", ioctl)
+    if TERMWIDTH <= 0:  # can occur if running in emacs pseudo-tty
+        TERMWIDTH = None
+except (ImportError, OSError):
+    TERMWIDTH = None
+
+
+def write_outstream(
+    stream: TextIO, *text: Union[str, bytes], quiet: bool = False
+) -> None:
+    if quiet:
+        return
+    encoding = getattr(stream, "encoding", "ascii") or "ascii"
+    for t in text:
+        if not isinstance(t, bytes):
+            t = t.encode(encoding, "replace")
+        t = t.decode(encoding)
+        try:
+            stream.write(t)
+        except OSError:
+            # suppress "broken pipe" errors.
+            # no known way to handle this on Python 3 however
+            # as the exception is "ignored" (noisily) in TextIOWrapper.
+            break
+
+
+@contextmanager
+def status(
+    status_msg: str, newline: bool = False, quiet: bool = False
+) -> Iterator[None]:
+    msg(status_msg + " ...", newline, flush=True, quiet=quiet)
+    try:
+        yield
+    except:
+        if not quiet:
+            write_outstream(sys.stdout, "  FAILED\n")
+        raise
+    else:
+        if not quiet:
+            write_outstream(sys.stdout, "  done\n")
+
+
+def err(message: str, quiet: bool = False) -> None:
+    log.error(message)
+    msg(f"FAILED: {message}", quiet=quiet)
+    sys.exit(-1)
+
+
+def obfuscate_url_pw(input_url: str) -> str:
+    u = url.make_url(input_url)
+    return sqla_compat.url_render_as_string(u, hide_password=True)  # type: ignore  # noqa: E501
+
+
+def warn(msg: str, stacklevel: int = 2) -> None:
+    warnings.warn(msg, UserWarning, stacklevel=stacklevel)
+
+
+def msg(
+    msg: str, newline: bool = True, flush: bool = False, quiet: bool = False
+) -> None:
+    if quiet:
+        return
+    if TERMWIDTH is None:
+        write_outstream(sys.stdout, msg)
+        if newline:
+            write_outstream(sys.stdout, "\n")
+    else:
+        # left indent output lines
+        lines = textwrap.wrap(msg, TERMWIDTH)
+        if len(lines) > 1:
+            for line in lines[0:-1]:
+                write_outstream(sys.stdout, "  ", line, "\n")
+        write_outstream(sys.stdout, "  ", lines[-1], ("\n" if newline else ""))
+    if flush:
+        sys.stdout.flush()
+
+
+def format_as_comma(value: Optional[Union[str, Iterable[str]]]) -> str:
+    if value is None:
+        return ""
+    elif isinstance(value, str):
+        return value
+    elif isinstance(value, Iterable):
+        return ", ".join(value)
+    else:
+        raise ValueError("Don't know how to comma-format %r" % value)
diff --git a/venv/Lib/site-packages/alembic/util/pyfiles.py b/venv/Lib/site-packages/alembic/util/pyfiles.py
new file mode 100644
index 0000000000000000000000000000000000000000..973bd458e5ce615a2b31aa3eeaeec61d6c2f709e
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/util/pyfiles.py
@@ -0,0 +1,114 @@
+from __future__ import annotations
+
+import atexit
+from contextlib import ExitStack
+import importlib
+import importlib.machinery
+import importlib.util
+import os
+import re
+import tempfile
+from types import ModuleType
+from typing import Any
+from typing import Optional
+
+from mako import exceptions
+from mako.template import Template
+
+from . import compat
+from .exc import CommandError
+
+
+def template_to_file(
+    template_file: str, dest: str, output_encoding: str, **kw: Any
+) -> None:
+    template = Template(filename=template_file)
+    try:
+        output = template.render_unicode(**kw).encode(output_encoding)
+    except:
+        with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as ntf:
+            ntf.write(
+                exceptions.text_error_template()
+                .render_unicode()
+                .encode(output_encoding)
+            )
+            fname = ntf.name
+        raise CommandError(
+            "Template rendering failed; see %s for a "
+            "template-oriented traceback." % fname
+        )
+    else:
+        with open(dest, "wb") as f:
+            f.write(output)
+
+
+def coerce_resource_to_filename(fname: str) -> str:
+    """Interpret a filename as either a filesystem location or as a package
+    resource.
+
+    Names that are non absolute paths and contain a colon
+    are interpreted as resources and coerced to a file location.
+
+    """
+    if not os.path.isabs(fname) and ":" in fname:
+        tokens = fname.split(":")
+
+        # from https://importlib-resources.readthedocs.io/en/latest/migration.html#pkg-resources-resource-filename  # noqa E501
+
+        file_manager = ExitStack()
+        atexit.register(file_manager.close)
+
+        ref = compat.importlib_resources.files(tokens[0])
+        for tok in tokens[1:]:
+            ref = ref / tok
+        fname = file_manager.enter_context(  # type: ignore[assignment]
+            compat.importlib_resources.as_file(ref)
+        )
+    return fname
+
+
+def pyc_file_from_path(path: str) -> Optional[str]:
+    """Given a python source path, locate the .pyc."""
+
+    candidate = importlib.util.cache_from_source(path)
+    if os.path.exists(candidate):
+        return candidate
+
+    # even for pep3147, fall back to the old way of finding .pyc files,
+    # to support sourceless operation
+    filepath, ext = os.path.splitext(path)
+    for ext in importlib.machinery.BYTECODE_SUFFIXES:
+        if os.path.exists(filepath + ext):
+            return filepath + ext
+    else:
+        return None
+
+
+def load_python_file(dir_: str, filename: str) -> ModuleType:
+    """Load a file from the given path as a Python module."""
+
+    module_id = re.sub(r"\W", "_", filename)
+    path = os.path.join(dir_, filename)
+    _, ext = os.path.splitext(filename)
+    if ext == ".py":
+        if os.path.exists(path):
+            module = load_module_py(module_id, path)
+        else:
+            pyc_path = pyc_file_from_path(path)
+            if pyc_path is None:
+                raise ImportError("Can't find Python file %s" % path)
+            else:
+                module = load_module_py(module_id, pyc_path)
+    elif ext in (".pyc", ".pyo"):
+        module = load_module_py(module_id, path)
+    else:
+        assert False
+    return module
+
+
+def load_module_py(module_id: str, path: str) -> ModuleType:
+    spec = importlib.util.spec_from_file_location(module_id, path)
+    assert spec
+    module = importlib.util.module_from_spec(spec)
+    spec.loader.exec_module(module)  # type: ignore
+    return module
diff --git a/venv/Lib/site-packages/alembic/util/sqla_compat.py b/venv/Lib/site-packages/alembic/util/sqla_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..8489c19fac7c163dc9053d2f52606855117d60a6
--- /dev/null
+++ b/venv/Lib/site-packages/alembic/util/sqla_compat.py
@@ -0,0 +1,665 @@
+# mypy: allow-untyped-defs, allow-incomplete-defs, allow-untyped-calls
+# mypy: no-warn-return-any, allow-any-generics
+
+from __future__ import annotations
+
+import contextlib
+import re
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import Iterable
+from typing import Iterator
+from typing import Mapping
+from typing import Optional
+from typing import Protocol
+from typing import Set
+from typing import Type
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from sqlalchemy import __version__
+from sqlalchemy import inspect
+from sqlalchemy import schema
+from sqlalchemy import sql
+from sqlalchemy import types as sqltypes
+from sqlalchemy.engine import url
+from sqlalchemy.schema import CheckConstraint
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import ForeignKeyConstraint
+from sqlalchemy.sql import visitors
+from sqlalchemy.sql.base import DialectKWArgs
+from sqlalchemy.sql.elements import BindParameter
+from sqlalchemy.sql.elements import ColumnClause
+from sqlalchemy.sql.elements import quoted_name
+from sqlalchemy.sql.elements import TextClause
+from sqlalchemy.sql.elements import UnaryExpression
+from sqlalchemy.sql.visitors import traverse
+from typing_extensions import TypeGuard
+
+if TYPE_CHECKING:
+    from sqlalchemy import ClauseElement
+    from sqlalchemy import Index
+    from sqlalchemy import Table
+    from sqlalchemy.engine import Connection
+    from sqlalchemy.engine import Dialect
+    from sqlalchemy.engine import Transaction
+    from sqlalchemy.engine.reflection import Inspector
+    from sqlalchemy.sql.base import ColumnCollection
+    from sqlalchemy.sql.compiler import SQLCompiler
+    from sqlalchemy.sql.dml import Insert
+    from sqlalchemy.sql.elements import ColumnElement
+    from sqlalchemy.sql.schema import Constraint
+    from sqlalchemy.sql.schema import SchemaItem
+    from sqlalchemy.sql.selectable import Select
+    from sqlalchemy.sql.selectable import TableClause
+
+_CE = TypeVar("_CE", bound=Union["ColumnElement[Any]", "SchemaItem"])
+
+
+class _CompilerProtocol(Protocol):
+    def __call__(self, element: Any, compiler: Any, **kw: Any) -> str:
+        ...
+
+
+def _safe_int(value: str) -> Union[int, str]:
+    try:
+        return int(value)
+    except:
+        return value
+
+
+_vers = tuple(
+    [_safe_int(x) for x in re.findall(r"(\d+|[abc]\d)", __version__)]
+)
+sqla_13 = _vers >= (1, 3)
+sqla_14 = _vers >= (1, 4)
+# https://docs.sqlalchemy.org/en/latest/changelog/changelog_14.html#change-0c6e0cc67dfe6fac5164720e57ef307d
+sqla_14_18 = _vers >= (1, 4, 18)
+sqla_14_26 = _vers >= (1, 4, 26)
+sqla_2 = _vers >= (2,)
+sqlalchemy_version = __version__
+
+try:
+    from sqlalchemy.sql.naming import _NONE_NAME as _NONE_NAME  # type: ignore[attr-defined]  # noqa: E501
+except ImportError:
+    from sqlalchemy.sql.elements import _NONE_NAME as _NONE_NAME  # type: ignore  # noqa: E501
+
+
+class _Unsupported:
+    "Placeholder for unsupported SQLAlchemy classes"
+
+
+if TYPE_CHECKING:
+
+    def compiles(
+        element: Type[ClauseElement], *dialects: str
+    ) -> Callable[[_CompilerProtocol], _CompilerProtocol]:
+        ...
+
+else:
+    from sqlalchemy.ext.compiler import compiles
+
+try:
+    from sqlalchemy import Computed as Computed
+except ImportError:
+    if not TYPE_CHECKING:
+
+        class Computed(_Unsupported):
+            pass
+
+    has_computed = False
+    has_computed_reflection = False
+else:
+    has_computed = True
+    has_computed_reflection = _vers >= (1, 3, 16)
+
+try:
+    from sqlalchemy import Identity as Identity
+except ImportError:
+    if not TYPE_CHECKING:
+
+        class Identity(_Unsupported):
+            pass
+
+    has_identity = False
+else:
+    identity_has_dialect_kwargs = issubclass(Identity, DialectKWArgs)
+
+    def _get_identity_options_dict(
+        identity: Union[Identity, schema.Sequence, None],
+        dialect_kwargs: bool = False,
+    ) -> Dict[str, Any]:
+        if identity is None:
+            return {}
+        elif identity_has_dialect_kwargs:
+            as_dict = identity._as_dict()  # type: ignore
+            if dialect_kwargs:
+                assert isinstance(identity, DialectKWArgs)
+                as_dict.update(identity.dialect_kwargs)
+        else:
+            as_dict = {}
+            if isinstance(identity, Identity):
+                # always=None means something different than always=False
+                as_dict["always"] = identity.always
+                if identity.on_null is not None:
+                    as_dict["on_null"] = identity.on_null
+            # attributes common to Identity and Sequence
+            attrs = (
+                "start",
+                "increment",
+                "minvalue",
+                "maxvalue",
+                "nominvalue",
+                "nomaxvalue",
+                "cycle",
+                "cache",
+                "order",
+            )
+            as_dict.update(
+                {
+                    key: getattr(identity, key, None)
+                    for key in attrs
+                    if getattr(identity, key, None) is not None
+                }
+            )
+        return as_dict
+
+    has_identity = True
+
+if sqla_2:
+    from sqlalchemy.sql.base import _NoneName
+else:
+    from sqlalchemy.util import symbol as _NoneName  # type: ignore[assignment]
+
+
+_ConstraintName = Union[None, str, _NoneName]
+
+_ConstraintNameDefined = Union[str, _NoneName]
+
+
+def constraint_name_defined(
+    name: _ConstraintName,
+) -> TypeGuard[_ConstraintNameDefined]:
+    return name is _NONE_NAME or isinstance(name, (str, _NoneName))
+
+
+def constraint_name_string(
+    name: _ConstraintName,
+) -> TypeGuard[str]:
+    return isinstance(name, str)
+
+
+def constraint_name_or_none(
+    name: _ConstraintName,
+) -> Optional[str]:
+    return name if constraint_name_string(name) else None
+
+
+AUTOINCREMENT_DEFAULT = "auto"
+
+
+@contextlib.contextmanager
+def _ensure_scope_for_ddl(
+    connection: Optional[Connection],
+) -> Iterator[None]:
+    try:
+        in_transaction = connection.in_transaction  # type: ignore[union-attr]
+    except AttributeError:
+        # catch for MockConnection, None
+        in_transaction = None
+        pass
+
+    # yield outside the catch
+    if in_transaction is None:
+        yield
+    else:
+        if not in_transaction():
+            assert connection is not None
+            with connection.begin():
+                yield
+        else:
+            yield
+
+
+def url_render_as_string(url, hide_password=True):
+    if sqla_14:
+        return url.render_as_string(hide_password=hide_password)
+    else:
+        return url.__to_string__(hide_password=hide_password)
+
+
+def _safe_begin_connection_transaction(
+    connection: Connection,
+) -> Transaction:
+    transaction = _get_connection_transaction(connection)
+    if transaction:
+        return transaction
+    else:
+        return connection.begin()
+
+
+def _safe_commit_connection_transaction(
+    connection: Connection,
+) -> None:
+    transaction = _get_connection_transaction(connection)
+    if transaction:
+        transaction.commit()
+
+
+def _safe_rollback_connection_transaction(
+    connection: Connection,
+) -> None:
+    transaction = _get_connection_transaction(connection)
+    if transaction:
+        transaction.rollback()
+
+
+def _get_connection_in_transaction(connection: Optional[Connection]) -> bool:
+    try:
+        in_transaction = connection.in_transaction  # type: ignore
+    except AttributeError:
+        # catch for MockConnection
+        return False
+    else:
+        return in_transaction()
+
+
+def _idx_table_bound_expressions(idx: Index) -> Iterable[ColumnElement[Any]]:
+    return idx.expressions  # type: ignore
+
+
+def _copy(schema_item: _CE, **kw) -> _CE:
+    if hasattr(schema_item, "_copy"):
+        return schema_item._copy(**kw)
+    else:
+        return schema_item.copy(**kw)  # type: ignore[union-attr]
+
+
+def _get_connection_transaction(
+    connection: Connection,
+) -> Optional[Transaction]:
+    if sqla_14:
+        return connection.get_transaction()
+    else:
+        r = connection._root  # type: ignore[attr-defined]
+        return r._Connection__transaction
+
+
+def _create_url(*arg, **kw) -> url.URL:
+    if hasattr(url.URL, "create"):
+        return url.URL.create(*arg, **kw)
+    else:
+        return url.URL(*arg, **kw)
+
+
+def _connectable_has_table(
+    connectable: Connection, tablename: str, schemaname: Union[str, None]
+) -> bool:
+    if sqla_14:
+        return inspect(connectable).has_table(tablename, schemaname)
+    else:
+        return connectable.dialect.has_table(
+            connectable, tablename, schemaname
+        )
+
+
+def _exec_on_inspector(inspector, statement, **params):
+    if sqla_14:
+        with inspector._operation_context() as conn:
+            return conn.execute(statement, params)
+    else:
+        return inspector.bind.execute(statement, params)
+
+
+def _nullability_might_be_unset(metadata_column):
+    if not sqla_14:
+        return metadata_column.nullable
+    else:
+        from sqlalchemy.sql import schema
+
+        return (
+            metadata_column._user_defined_nullable is schema.NULL_UNSPECIFIED
+        )
+
+
+def _server_default_is_computed(*server_default) -> bool:
+    if not has_computed:
+        return False
+    else:
+        return any(isinstance(sd, Computed) for sd in server_default)
+
+
+def _server_default_is_identity(*server_default) -> bool:
+    if not sqla_14:
+        return False
+    else:
+        return any(isinstance(sd, Identity) for sd in server_default)
+
+
+def _table_for_constraint(constraint: Constraint) -> Table:
+    if isinstance(constraint, ForeignKeyConstraint):
+        table = constraint.parent
+        assert table is not None
+        return table  # type: ignore[return-value]
+    else:
+        return constraint.table
+
+
+def _columns_for_constraint(constraint):
+    if isinstance(constraint, ForeignKeyConstraint):
+        return [fk.parent for fk in constraint.elements]
+    elif isinstance(constraint, CheckConstraint):
+        return _find_columns(constraint.sqltext)
+    else:
+        return list(constraint.columns)
+
+
+def _reflect_table(inspector: Inspector, table: Table) -> None:
+    if sqla_14:
+        return inspector.reflect_table(table, None)
+    else:
+        return inspector.reflecttable(  # type: ignore[attr-defined]
+            table, None
+        )
+
+
+def _resolve_for_variant(type_, dialect):
+    if _type_has_variants(type_):
+        base_type, mapping = _get_variant_mapping(type_)
+        return mapping.get(dialect.name, base_type)
+    else:
+        return type_
+
+
+if hasattr(sqltypes.TypeEngine, "_variant_mapping"):
+
+    def _type_has_variants(type_):
+        return bool(type_._variant_mapping)
+
+    def _get_variant_mapping(type_):
+        return type_, type_._variant_mapping
+
+else:
+
+    def _type_has_variants(type_):
+        return type(type_) is sqltypes.Variant
+
+    def _get_variant_mapping(type_):
+        return type_.impl, type_.mapping
+
+
+def _fk_spec(constraint: ForeignKeyConstraint) -> Any:
+    if TYPE_CHECKING:
+        assert constraint.columns is not None
+        assert constraint.elements is not None
+        assert isinstance(constraint.parent, Table)
+
+    source_columns = [
+        constraint.columns[key].name for key in constraint.column_keys
+    ]
+
+    source_table = constraint.parent.name
+    source_schema = constraint.parent.schema
+    target_schema = constraint.elements[0].column.table.schema
+    target_table = constraint.elements[0].column.table.name
+    target_columns = [element.column.name for element in constraint.elements]
+    ondelete = constraint.ondelete
+    onupdate = constraint.onupdate
+    deferrable = constraint.deferrable
+    initially = constraint.initially
+    return (
+        source_schema,
+        source_table,
+        source_columns,
+        target_schema,
+        target_table,
+        target_columns,
+        onupdate,
+        ondelete,
+        deferrable,
+        initially,
+    )
+
+
+def _fk_is_self_referential(constraint: ForeignKeyConstraint) -> bool:
+    spec = constraint.elements[0]._get_colspec()
+    tokens = spec.split(".")
+    tokens.pop(-1)  # colname
+    tablekey = ".".join(tokens)
+    assert constraint.parent is not None
+    return tablekey == constraint.parent.key
+
+
+def _is_type_bound(constraint: Constraint) -> bool:
+    # this deals with SQLAlchemy #3260, don't copy CHECK constraints
+    # that will be generated by the type.
+    # new feature added for #3260
+    return constraint._type_bound
+
+
+def _find_columns(clause):
+    """locate Column objects within the given expression."""
+
+    cols: Set[ColumnElement[Any]] = set()
+    traverse(clause, {}, {"column": cols.add})
+    return cols
+
+
+def _remove_column_from_collection(
+    collection: ColumnCollection, column: Union[Column[Any], ColumnClause[Any]]
+) -> None:
+    """remove a column from a ColumnCollection."""
+
+    # workaround for older SQLAlchemy, remove the
+    # same object that's present
+    assert column.key is not None
+    to_remove = collection[column.key]
+
+    # SQLAlchemy 2.0 will use more ReadOnlyColumnCollection
+    # (renamed from ImmutableColumnCollection)
+    if hasattr(collection, "_immutable") or hasattr(collection, "_readonly"):
+        collection._parent.remove(to_remove)
+    else:
+        collection.remove(to_remove)
+
+
+def _textual_index_column(
+    table: Table, text_: Union[str, TextClause, ColumnElement[Any]]
+) -> Union[ColumnElement[Any], Column[Any]]:
+    """a workaround for the Index construct's severe lack of flexibility"""
+    if isinstance(text_, str):
+        c = Column(text_, sqltypes.NULLTYPE)
+        table.append_column(c)
+        return c
+    elif isinstance(text_, TextClause):
+        return _textual_index_element(table, text_)
+    elif isinstance(text_, _textual_index_element):
+        return _textual_index_column(table, text_.text)
+    elif isinstance(text_, sql.ColumnElement):
+        return _copy_expression(text_, table)
+    else:
+        raise ValueError("String or text() construct expected")
+
+
+def _copy_expression(expression: _CE, target_table: Table) -> _CE:
+    def replace(col):
+        if (
+            isinstance(col, Column)
+            and col.table is not None
+            and col.table is not target_table
+        ):
+            if col.name in target_table.c:
+                return target_table.c[col.name]
+            else:
+                c = _copy(col)
+                target_table.append_column(c)
+                return c
+        else:
+            return None
+
+    return visitors.replacement_traverse(  # type: ignore[call-overload]
+        expression, {}, replace
+    )
+
+
+class _textual_index_element(sql.ColumnElement):
+    """Wrap around a sqlalchemy text() construct in such a way that
+    we appear like a column-oriented SQL expression to an Index
+    construct.
+
+    The issue here is that currently the Postgresql dialect, the biggest
+    recipient of functional indexes, keys all the index expressions to
+    the corresponding column expressions when rendering CREATE INDEX,
+    so the Index we create here needs to have a .columns collection that
+    is the same length as the .expressions collection.  Ultimately
+    SQLAlchemy should support text() expressions in indexes.
+
+    See SQLAlchemy issue 3174.
+
+    """
+
+    __visit_name__ = "_textual_idx_element"
+
+    def __init__(self, table: Table, text: TextClause) -> None:
+        self.table = table
+        self.text = text
+        self.key = text.text
+        self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE)
+        table.append_column(self.fake_column)
+
+    def get_children(self):
+        return [self.fake_column]
+
+
+@compiles(_textual_index_element)
+def _render_textual_index_column(
+    element: _textual_index_element, compiler: SQLCompiler, **kw
+) -> str:
+    return compiler.process(element.text, **kw)
+
+
+class _literal_bindparam(BindParameter):
+    pass
+
+
+@compiles(_literal_bindparam)
+def _render_literal_bindparam(
+    element: _literal_bindparam, compiler: SQLCompiler, **kw
+) -> str:
+    return compiler.render_literal_bindparam(element, **kw)
+
+
+def _column_kwargs(col: Column) -> Mapping:
+    if sqla_13:
+        return col.kwargs
+    else:
+        return {}
+
+
+def _get_constraint_final_name(
+    constraint: Union[Index, Constraint], dialect: Optional[Dialect]
+) -> Optional[str]:
+    if constraint.name is None:
+        return None
+    assert dialect is not None
+    if sqla_14:
+        # for SQLAlchemy 1.4 we would like to have the option to expand
+        # the use of "deferred" names for constraints as well as to have
+        # some flexibility with "None" name and similar; make use of new
+        # SQLAlchemy API to return what would be the final compiled form of
+        # the name for this dialect.
+        return dialect.identifier_preparer.format_constraint(
+            constraint, _alembic_quote=False
+        )
+    else:
+        # prior to SQLAlchemy 1.4, work around quoting logic to get at the
+        # final compiled name without quotes.
+        if hasattr(constraint.name, "quote"):
+            # might be quoted_name, might be truncated_name, keep it the
+            # same
+            quoted_name_cls: type = type(constraint.name)
+        else:
+            quoted_name_cls = quoted_name
+
+        new_name = quoted_name_cls(str(constraint.name), quote=False)
+        constraint = constraint.__class__(name=new_name)
+
+        if isinstance(constraint, schema.Index):
+            # name should not be quoted.
+            d = dialect.ddl_compiler(dialect, None)  # type: ignore[arg-type]
+            return d._prepared_index_name(constraint)
+        else:
+            # name should not be quoted.
+            return dialect.identifier_preparer.format_constraint(constraint)
+
+
+def _constraint_is_named(
+    constraint: Union[Constraint, Index], dialect: Optional[Dialect]
+) -> bool:
+    if sqla_14:
+        if constraint.name is None:
+            return False
+        assert dialect is not None
+        name = dialect.identifier_preparer.format_constraint(
+            constraint, _alembic_quote=False
+        )
+        return name is not None
+    else:
+        return constraint.name is not None
+
+
+def _is_mariadb(mysql_dialect: Dialect) -> bool:
+    if sqla_14:
+        return mysql_dialect.is_mariadb  # type: ignore[attr-defined]
+    else:
+        return bool(
+            mysql_dialect.server_version_info
+            and mysql_dialect._is_mariadb  # type: ignore[attr-defined]
+        )
+
+
+def _mariadb_normalized_version_info(mysql_dialect):
+    return mysql_dialect._mariadb_normalized_version_info
+
+
+def _insert_inline(table: Union[TableClause, Table]) -> Insert:
+    if sqla_14:
+        return table.insert().inline()
+    else:
+        return table.insert(inline=True)  # type: ignore[call-arg]
+
+
+if sqla_14:
+    from sqlalchemy import create_mock_engine
+
+    # weird mypy workaround
+    from sqlalchemy import select as _sa_select
+
+    _select = _sa_select
+else:
+    from sqlalchemy import create_engine
+
+    def create_mock_engine(url, executor, **kw):  # type: ignore[misc]
+        return create_engine(
+            "postgresql://", strategy="mock", executor=executor
+        )
+
+    def _select(*columns, **kw) -> Select:
+        return sql.select(list(columns), **kw)  # type: ignore[call-overload]
+
+
+def is_expression_index(index: Index) -> bool:
+    for expr in index.expressions:
+        if is_expression(expr):
+            return True
+    return False
+
+
+def is_expression(expr: Any) -> bool:
+    while isinstance(expr, UnaryExpression):
+        expr = expr.element
+    if not isinstance(expr, ColumnClause) or expr.is_literal:
+        return True
+    return False
diff --git a/venv/Lib/site-packages/flask_migrate/__init__.py b/venv/Lib/site-packages/flask_migrate/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c61b7f5ebd75b5954c653d5e0ae74b21b036c266
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/__init__.py
@@ -0,0 +1,266 @@
+import argparse
+from functools import wraps
+import logging
+import os
+import sys
+from flask import current_app
+from alembic import __version__ as __alembic_version__
+from alembic.config import Config as AlembicConfig
+from alembic import command
+from alembic.util import CommandError
+
+alembic_version = tuple([int(v) for v in __alembic_version__.split('.')[0:3]])
+log = logging.getLogger(__name__)
+
+
+class _MigrateConfig(object):
+    def __init__(self, migrate, db, **kwargs):
+        self.migrate = migrate
+        self.db = db
+        self.directory = migrate.directory
+        self.configure_args = kwargs
+
+    @property
+    def metadata(self):
+        """
+        Backwards compatibility, in old releases app.extensions['migrate']
+        was set to db, and env.py accessed app.extensions['migrate'].metadata
+        """
+        return self.db.metadata
+
+
+class Config(AlembicConfig):
+    def __init__(self, *args, **kwargs):
+        self.template_directory = kwargs.pop('template_directory', None)
+        super().__init__(*args, **kwargs)
+
+    def get_template_directory(self):
+        if self.template_directory:
+            return self.template_directory
+        package_dir = os.path.abspath(os.path.dirname(__file__))
+        return os.path.join(package_dir, 'templates')
+
+
+class Migrate(object):
+    def __init__(self, app=None, db=None, directory='migrations', command='db',
+                 compare_type=True, render_as_batch=True, **kwargs):
+        self.configure_callbacks = []
+        self.db = db
+        self.command = command
+        self.directory = str(directory)
+        self.alembic_ctx_kwargs = kwargs
+        self.alembic_ctx_kwargs['compare_type'] = compare_type
+        self.alembic_ctx_kwargs['render_as_batch'] = render_as_batch
+        if app is not None and db is not None:
+            self.init_app(app, db, directory)
+
+    def init_app(self, app, db=None, directory=None, command=None,
+                 compare_type=None, render_as_batch=None, **kwargs):
+        self.db = db or self.db
+        self.command = command or self.command
+        self.directory = str(directory or self.directory)
+        self.alembic_ctx_kwargs.update(kwargs)
+        if compare_type is not None:
+            self.alembic_ctx_kwargs['compare_type'] = compare_type
+        if render_as_batch is not None:
+            self.alembic_ctx_kwargs['render_as_batch'] = render_as_batch
+        if not hasattr(app, 'extensions'):
+            app.extensions = {}
+        app.extensions['migrate'] = _MigrateConfig(
+            self, self.db, **self.alembic_ctx_kwargs)
+
+        from flask_migrate.cli import db as db_cli_group
+        app.cli.add_command(db_cli_group, name=self.command)
+
+    def configure(self, f):
+        self.configure_callbacks.append(f)
+        return f
+
+    def call_configure_callbacks(self, config):
+        for f in self.configure_callbacks:
+            config = f(config)
+        return config
+
+    def get_config(self, directory=None, x_arg=None, opts=None):
+        if directory is None:
+            directory = self.directory
+        directory = str(directory)
+        config = Config(os.path.join(directory, 'alembic.ini'))
+        config.set_main_option('script_location', directory)
+        if config.cmd_opts is None:
+            config.cmd_opts = argparse.Namespace()
+        for opt in opts or []:
+            setattr(config.cmd_opts, opt, True)
+        if not hasattr(config.cmd_opts, 'x'):
+            if x_arg is not None:
+                setattr(config.cmd_opts, 'x', [])
+                if isinstance(x_arg, list) or isinstance(x_arg, tuple):
+                    for x in x_arg:
+                        config.cmd_opts.x.append(x)
+                else:
+                    config.cmd_opts.x.append(x_arg)
+            else:
+                setattr(config.cmd_opts, 'x', None)
+        return self.call_configure_callbacks(config)
+
+
+def catch_errors(f):
+    @wraps(f)
+    def wrapped(*args, **kwargs):
+        try:
+            f(*args, **kwargs)
+        except (CommandError, RuntimeError) as exc:
+            log.error('Error: ' + str(exc))
+            sys.exit(1)
+    return wrapped
+
+
+@catch_errors
+def list_templates():
+    """List available templates."""
+    config = Config()
+    config.print_stdout("Available templates:\n")
+    for tempname in sorted(os.listdir(config.get_template_directory())):
+        with open(
+            os.path.join(config.get_template_directory(), tempname, "README")
+        ) as readme:
+            synopsis = next(readme).strip()
+        config.print_stdout("%s - %s", tempname, synopsis)
+
+
+@catch_errors
+def init(directory=None, multidb=False, template=None, package=False):
+    """Creates a new migration repository"""
+    if directory is None:
+        directory = current_app.extensions['migrate'].directory
+    template_directory = None
+    if template is not None and ('/' in template or '\\' in template):
+        template_directory, template = os.path.split(template)
+    config = Config(template_directory=template_directory)
+    config.set_main_option('script_location', directory)
+    config.config_file_name = os.path.join(directory, 'alembic.ini')
+    config = current_app.extensions['migrate'].\
+        migrate.call_configure_callbacks(config)
+    if multidb and template is None:
+        template = 'flask-multidb'
+    elif template is None:
+        template = 'flask'
+    command.init(config, directory, template=template, package=package)
+
+
+@catch_errors
+def revision(directory=None, message=None, autogenerate=False, sql=False,
+             head='head', splice=False, branch_label=None, version_path=None,
+             rev_id=None):
+    """Create a new revision file."""
+    opts = ['autogenerate'] if autogenerate else None
+    config = current_app.extensions['migrate'].migrate.get_config(
+        directory, opts=opts)
+    command.revision(config, message, autogenerate=autogenerate, sql=sql,
+                     head=head, splice=splice, branch_label=branch_label,
+                     version_path=version_path, rev_id=rev_id)
+
+
+@catch_errors
+def migrate(directory=None, message=None, sql=False, head='head', splice=False,
+            branch_label=None, version_path=None, rev_id=None, x_arg=None):
+    """Alias for 'revision --autogenerate'"""
+    config = current_app.extensions['migrate'].migrate.get_config(
+        directory, opts=['autogenerate'], x_arg=x_arg)
+    command.revision(config, message, autogenerate=True, sql=sql,
+                     head=head, splice=splice, branch_label=branch_label,
+                     version_path=version_path, rev_id=rev_id)
+
+
+@catch_errors
+def edit(directory=None, revision='current'):
+    """Edit current revision."""
+    if alembic_version >= (0, 8, 0):
+        config = current_app.extensions['migrate'].migrate.get_config(
+            directory)
+        command.edit(config, revision)
+    else:
+        raise RuntimeError('Alembic 0.8.0 or greater is required')
+
+
+@catch_errors
+def merge(directory=None, revisions='', message=None, branch_label=None,
+          rev_id=None):
+    """Merge two revisions together.  Creates a new migration file"""
+    config = current_app.extensions['migrate'].migrate.get_config(directory)
+    command.merge(config, revisions, message=message,
+                  branch_label=branch_label, rev_id=rev_id)
+
+
+@catch_errors
+def upgrade(directory=None, revision='head', sql=False, tag=None, x_arg=None):
+    """Upgrade to a later version"""
+    config = current_app.extensions['migrate'].migrate.get_config(directory,
+                                                                  x_arg=x_arg)
+    command.upgrade(config, revision, sql=sql, tag=tag)
+
+
+@catch_errors
+def downgrade(directory=None, revision='-1', sql=False, tag=None, x_arg=None):
+    """Revert to a previous version"""
+    config = current_app.extensions['migrate'].migrate.get_config(directory,
+                                                                  x_arg=x_arg)
+    if sql and revision == '-1':
+        revision = 'head:-1'
+    command.downgrade(config, revision, sql=sql, tag=tag)
+
+
+@catch_errors
+def show(directory=None, revision='head'):
+    """Show the revision denoted by the given symbol."""
+    config = current_app.extensions['migrate'].migrate.get_config(directory)
+    command.show(config, revision)
+
+
+@catch_errors
+def history(directory=None, rev_range=None, verbose=False,
+            indicate_current=False):
+    """List changeset scripts in chronological order."""
+    config = current_app.extensions['migrate'].migrate.get_config(directory)
+    if alembic_version >= (0, 9, 9):
+        command.history(config, rev_range, verbose=verbose,
+                        indicate_current=indicate_current)
+    else:
+        command.history(config, rev_range, verbose=verbose)
+
+
+@catch_errors
+def heads(directory=None, verbose=False, resolve_dependencies=False):
+    """Show current available heads in the script directory"""
+    config = current_app.extensions['migrate'].migrate.get_config(directory)
+    command.heads(config, verbose=verbose,
+                  resolve_dependencies=resolve_dependencies)
+
+
+@catch_errors
+def branches(directory=None, verbose=False):
+    """Show current branch points"""
+    config = current_app.extensions['migrate'].migrate.get_config(directory)
+    command.branches(config, verbose=verbose)
+
+
+@catch_errors
+def current(directory=None, verbose=False):
+    """Display the current revision for each database."""
+    config = current_app.extensions['migrate'].migrate.get_config(directory)
+    command.current(config, verbose=verbose)
+
+
+@catch_errors
+def stamp(directory=None, revision='head', sql=False, tag=None):
+    """'stamp' the revision table with the given revision; don't run any
+    migrations"""
+    config = current_app.extensions['migrate'].migrate.get_config(directory)
+    command.stamp(config, revision, sql=sql, tag=tag)
+
+
+@catch_errors
+def check(directory=None):
+    """Check if there are any new operations to migrate"""
+    config = current_app.extensions['migrate'].migrate.get_config(directory)
+    command.check(config)
diff --git a/venv/Lib/site-packages/flask_migrate/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/flask_migrate/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c3c1da941e045aefe47254381667c2f92c89a4ee
Binary files /dev/null and b/venv/Lib/site-packages/flask_migrate/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_migrate/__pycache__/cli.cpython-311.pyc b/venv/Lib/site-packages/flask_migrate/__pycache__/cli.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..727eaf131582a7423623548c11ae652947b27ee2
Binary files /dev/null and b/venv/Lib/site-packages/flask_migrate/__pycache__/cli.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_migrate/cli.py b/venv/Lib/site-packages/flask_migrate/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..176672c58683db543d72b80e28810016aeb08169
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/cli.py
@@ -0,0 +1,251 @@
+import click
+from flask.cli import with_appcontext
+from flask_migrate import list_templates as _list_templates
+from flask_migrate import init as _init
+from flask_migrate import revision as _revision
+from flask_migrate import migrate as _migrate
+from flask_migrate import edit as _edit
+from flask_migrate import merge as _merge
+from flask_migrate import upgrade as _upgrade
+from flask_migrate import downgrade as _downgrade
+from flask_migrate import show as _show
+from flask_migrate import history as _history
+from flask_migrate import heads as _heads
+from flask_migrate import branches as _branches
+from flask_migrate import current as _current
+from flask_migrate import stamp as _stamp
+from flask_migrate import check as _check
+
+
+@click.group()
+def db():
+    """Perform database migrations."""
+    pass
+
+
+@db.command()
+@with_appcontext
+def list_templates():
+    """List available templates."""
+    _list_templates()
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('--multidb', is_flag=True,
+              help=('Support multiple databases'))
+@click.option('-t', '--template', default=None,
+              help=('Repository template to use (default is "flask")'))
+@click.option('--package', is_flag=True,
+              help=('Write empty __init__.py files to the environment and '
+                    'version locations'))
+@with_appcontext
+def init(directory, multidb, template, package):
+    """Creates a new migration repository."""
+    _init(directory, multidb, template, package)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('-m', '--message', default=None, help='Revision message')
+@click.option('--autogenerate', is_flag=True,
+              help=('Populate revision script with candidate migration '
+                    'operations, based on comparison of database to model'))
+@click.option('--sql', is_flag=True,
+              help=('Don\'t emit SQL to database - dump to standard output '
+                    'instead'))
+@click.option('--head', default='head',
+              help=('Specify head revision or <branchname>@head to base new '
+                    'revision on'))
+@click.option('--splice', is_flag=True,
+              help=('Allow a non-head revision as the "head" to splice onto'))
+@click.option('--branch-label', default=None,
+              help=('Specify a branch label to apply to the new revision'))
+@click.option('--version-path', default=None,
+              help=('Specify specific path from config for version file'))
+@click.option('--rev-id', default=None,
+              help=('Specify a hardcoded revision id instead of generating '
+                    'one'))
+@with_appcontext
+def revision(directory, message, autogenerate, sql, head, splice, branch_label,
+             version_path, rev_id):
+    """Create a new revision file."""
+    _revision(directory, message, autogenerate, sql, head, splice,
+              branch_label, version_path, rev_id)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('-m', '--message', default=None, help='Revision message')
+@click.option('--sql', is_flag=True,
+              help=('Don\'t emit SQL to database - dump to standard output '
+                    'instead'))
+@click.option('--head', default='head',
+              help=('Specify head revision or <branchname>@head to base new '
+                    'revision on'))
+@click.option('--splice', is_flag=True,
+              help=('Allow a non-head revision as the "head" to splice onto'))
+@click.option('--branch-label', default=None,
+              help=('Specify a branch label to apply to the new revision'))
+@click.option('--version-path', default=None,
+              help=('Specify specific path from config for version file'))
+@click.option('--rev-id', default=None,
+              help=('Specify a hardcoded revision id instead of generating '
+                    'one'))
+@click.option('-x', '--x-arg', multiple=True,
+              help='Additional arguments consumed by custom env.py scripts')
+@with_appcontext
+def migrate(directory, message, sql, head, splice, branch_label, version_path,
+            rev_id, x_arg):
+    """Autogenerate a new revision file (Alias for
+    'revision --autogenerate')"""
+    _migrate(directory, message, sql, head, splice, branch_label, version_path,
+             rev_id, x_arg)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.argument('revision', default='head')
+@with_appcontext
+def edit(directory, revision):
+    """Edit a revision file"""
+    _edit(directory, revision)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('-m', '--message', default=None, help='Merge revision message')
+@click.option('--branch-label', default=None,
+              help=('Specify a branch label to apply to the new revision'))
+@click.option('--rev-id', default=None,
+              help=('Specify a hardcoded revision id instead of generating '
+                    'one'))
+@click.argument('revisions', nargs=-1)
+@with_appcontext
+def merge(directory, message, branch_label, rev_id, revisions):
+    """Merge two revisions together, creating a new revision file"""
+    _merge(directory, revisions, message, branch_label, rev_id)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('--sql', is_flag=True,
+              help=('Don\'t emit SQL to database - dump to standard output '
+                    'instead'))
+@click.option('--tag', default=None,
+              help=('Arbitrary "tag" name - can be used by custom env.py '
+                    'scripts'))
+@click.option('-x', '--x-arg', multiple=True,
+              help='Additional arguments consumed by custom env.py scripts')
+@click.argument('revision', default='head')
+@with_appcontext
+def upgrade(directory, sql, tag, x_arg, revision):
+    """Upgrade to a later version"""
+    _upgrade(directory, revision, sql, tag, x_arg)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('--sql', is_flag=True,
+              help=('Don\'t emit SQL to database - dump to standard output '
+                    'instead'))
+@click.option('--tag', default=None,
+              help=('Arbitrary "tag" name - can be used by custom env.py '
+                    'scripts'))
+@click.option('-x', '--x-arg', multiple=True,
+              help='Additional arguments consumed by custom env.py scripts')
+@click.argument('revision', default='-1')
+@with_appcontext
+def downgrade(directory, sql, tag, x_arg, revision):
+    """Revert to a previous version"""
+    _downgrade(directory, revision, sql, tag, x_arg)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.argument('revision', default='head')
+@with_appcontext
+def show(directory, revision):
+    """Show the revision denoted by the given symbol."""
+    _show(directory, revision)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('-r', '--rev-range', default=None,
+              help='Specify a revision range; format is [start]:[end]')
+@click.option('-v', '--verbose', is_flag=True, help='Use more verbose output')
+@click.option('-i', '--indicate-current', is_flag=True,
+              help=('Indicate current version (Alembic 0.9.9 or greater is '
+                    'required)'))
+@with_appcontext
+def history(directory, rev_range, verbose, indicate_current):
+    """List changeset scripts in chronological order."""
+    _history(directory, rev_range, verbose, indicate_current)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('-v', '--verbose', is_flag=True, help='Use more verbose output')
+@click.option('--resolve-dependencies', is_flag=True,
+              help='Treat dependency versions as down revisions')
+@with_appcontext
+def heads(directory, verbose, resolve_dependencies):
+    """Show current available heads in the script directory"""
+    _heads(directory, verbose, resolve_dependencies)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('-v', '--verbose', is_flag=True, help='Use more verbose output')
+@with_appcontext
+def branches(directory, verbose):
+    """Show current branch points"""
+    _branches(directory, verbose)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('-v', '--verbose', is_flag=True, help='Use more verbose output')
+@with_appcontext
+def current(directory, verbose):
+    """Display the current revision for each database."""
+    _current(directory, verbose)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@click.option('--sql', is_flag=True,
+              help=('Don\'t emit SQL to database - dump to standard output '
+                    'instead'))
+@click.option('--tag', default=None,
+              help=('Arbitrary "tag" name - can be used by custom env.py '
+                    'scripts'))
+@click.argument('revision', default='head')
+@with_appcontext
+def stamp(directory, sql, tag, revision):
+    """'stamp' the revision table with the given revision; don't run any
+    migrations"""
+    _stamp(directory, revision, sql, tag)
+
+
+@db.command()
+@click.option('-d', '--directory', default=None,
+              help=('Migration script directory (default is "migrations")'))
+@with_appcontext
+def check(directory):
+    """Check if there are any new operations to migrate"""
+    _check(directory)
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/README b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/README
new file mode 100644
index 0000000000000000000000000000000000000000..02cce84ee2b7916aaa9f5afa8e3f7d177747b057
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/README
@@ -0,0 +1 @@
+Multi-database configuration for aioflask.
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/__pycache__/env.cpython-311.pyc b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/__pycache__/env.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0acffee73d9cfbe3e63ab0b78362b01617cda385
Binary files /dev/null and b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/__pycache__/env.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/alembic.ini.mako b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/alembic.ini.mako
new file mode 100644
index 0000000000000000000000000000000000000000..ec9d45c26a6bb54e833fd4e6ce2de29343894f4b
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/alembic.ini.mako
@@ -0,0 +1,50 @@
+# A generic, single database configuration.
+
+[alembic]
+# template used to generate migration files
+# file_template = %%(rev)s_%%(slug)s
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic,flask_migrate
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[logger_flask_migrate]
+level = INFO
+handlers =
+qualname = flask_migrate
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/env.py b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..f14bd6c091bc954c6a5ddcf7f081057f6ffbb432
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/env.py
@@ -0,0 +1,202 @@
+import asyncio
+import logging
+from logging.config import fileConfig
+
+from sqlalchemy import MetaData
+from flask import current_app
+
+from alembic import context
+
+USE_TWOPHASE = False
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+fileConfig(config.config_file_name)
+logger = logging.getLogger('alembic.env')
+
+
+def get_engine(bind_key=None):
+    try:
+        # this works with Flask-SQLAlchemy<3 and Alchemical
+        return current_app.extensions['migrate'].db.get_engine(bind=bind_key)
+    except (TypeError, AttributeError):
+        # this works with Flask-SQLAlchemy>=3
+        return current_app.extensions['migrate'].db.engines.get(bind_key)
+
+
+def get_engine_url(bind_key=None):
+    try:
+        return get_engine(bind_key).url.render_as_string(
+            hide_password=False).replace('%', '%%')
+    except AttributeError:
+        return str(get_engine(bind_key).url).replace('%', '%%')
+
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+config.set_main_option('sqlalchemy.url', get_engine_url())
+bind_names = []
+if current_app.config.get('SQLALCHEMY_BINDS') is not None:
+    bind_names = list(current_app.config['SQLALCHEMY_BINDS'].keys())
+else:
+    get_bind_names = getattr(current_app.extensions['migrate'].db,
+                             'bind_names', None)
+    if get_bind_names:
+        bind_names = get_bind_names()
+for bind in bind_names:
+    context.config.set_section_option(
+        bind, "sqlalchemy.url", get_engine_url(bind_key=bind))
+target_db = current_app.extensions['migrate'].db
+
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def get_metadata(bind):
+    """Return the metadata for a bind."""
+    if bind == '':
+        bind = None
+    if hasattr(target_db, 'metadatas'):
+        return target_db.metadatas[bind]
+
+    # legacy, less flexible implementation
+    m = MetaData()
+    for t in target_db.metadata.tables.values():
+        if t.info.get('bind_key') == bind:
+            t.tometadata(m)
+    return m
+
+
+def run_migrations_offline():
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    # for the --sql use case, run migrations for each URL into
+    # individual files.
+
+    engines = {
+        '': {
+            'url': context.config.get_main_option('sqlalchemy.url')
+        }
+    }
+    for name in bind_names:
+        engines[name] = rec = {}
+        rec['url'] = context.config.get_section_option(name, "sqlalchemy.url")
+
+    for name, rec in engines.items():
+        logger.info("Migrating database %s" % (name or '<default>'))
+        file_ = "%s.sql" % name
+        logger.info("Writing output to %s" % file_)
+        with open(file_, 'w') as buffer:
+            context.configure(
+                url=rec['url'],
+                output_buffer=buffer,
+                target_metadata=get_metadata(name),
+                literal_binds=True,
+            )
+            with context.begin_transaction():
+                context.run_migrations(engine_name=name)
+
+
+def do_run_migrations(_, engines):
+    # this callback is used to prevent an auto-migration from being generated
+    # when there are no changes to the schema
+    # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
+    def process_revision_directives(context, revision, directives):
+        if getattr(config.cmd_opts, 'autogenerate', False):
+            script = directives[0]
+            if len(script.upgrade_ops_list) >= len(bind_names) + 1:
+                empty = True
+                for upgrade_ops in script.upgrade_ops_list:
+                    if not upgrade_ops.is_empty():
+                        empty = False
+                if empty:
+                    directives[:] = []
+                    logger.info('No changes in schema detected.')
+
+    conf_args = current_app.extensions['migrate'].configure_args
+    if conf_args.get("process_revision_directives") is None:
+        conf_args["process_revision_directives"] = process_revision_directives
+
+    for name, rec in engines.items():
+        rec['sync_connection'] = conn = rec['connection']._sync_connection()
+        if USE_TWOPHASE:
+            rec['transaction'] = conn.begin_twophase()
+        else:
+            rec['transaction'] = conn.begin()
+
+    try:
+        for name, rec in engines.items():
+            logger.info("Migrating database %s" % (name or '<default>'))
+            context.configure(
+                connection=rec['sync_connection'],
+                upgrade_token="%s_upgrades" % name,
+                downgrade_token="%s_downgrades" % name,
+                target_metadata=get_metadata(name),
+                **conf_args
+            )
+            context.run_migrations(engine_name=name)
+
+        if USE_TWOPHASE:
+            for rec in engines.values():
+                rec['transaction'].prepare()
+
+        for rec in engines.values():
+            rec['transaction'].commit()
+    except:  # noqa: E722
+        for rec in engines.values():
+            rec['transaction'].rollback()
+        raise
+    finally:
+        for rec in engines.values():
+            rec['sync_connection'].close()
+
+
+async def run_migrations_online():
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    # for the direct-to-DB use case, start a transaction on all
+    # engines, then run all migrations, then commit all transactions.
+    engines = {
+        '': {'engine': get_engine()}
+    }
+    for name in bind_names:
+        engines[name] = rec = {}
+        rec['engine'] = get_engine(bind_key=name)
+
+    for name, rec in engines.items():
+        engine = rec['engine']
+        rec['connection'] = await engine.connect().start()
+
+    await engines['']['connection'].run_sync(do_run_migrations, engines)
+
+    for rec in engines.values():
+        await rec['connection'].close()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    asyncio.get_event_loop().run_until_complete(run_migrations_online())
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/script.py.mako b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..3beabc46397a862b61217f3ae5fcb6397a6ca72f
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/aioflask-multidb/script.py.mako
@@ -0,0 +1,53 @@
+<%!
+import re
+
+%>"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade(engine_name):
+    globals()["upgrade_%s" % engine_name]()
+
+
+def downgrade(engine_name):
+    globals()["downgrade_%s" % engine_name]()
+
+<%
+    from flask import current_app
+    bind_names = []
+    if current_app.config.get('SQLALCHEMY_BINDS') is not None:
+        bind_names = list(current_app.config['SQLALCHEMY_BINDS'].keys())
+    else:
+        get_bind_names = getattr(current_app.extensions['migrate'].db, 'bind_names', None)
+        if get_bind_names:
+            bind_names = get_bind_names()
+    db_names = [''] + bind_names
+%>
+
+## generate an "upgrade_<xyz>() / downgrade_<xyz>()" function
+## for each database name in the ini file.
+
+% for db_name in db_names:
+
+def upgrade_${db_name}():
+    ${context.get("%s_upgrades" % db_name, "pass")}
+
+
+def downgrade_${db_name}():
+    ${context.get("%s_downgrades" % db_name, "pass")}
+
+% endfor
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask/README b/venv/Lib/site-packages/flask_migrate/templates/aioflask/README
new file mode 100644
index 0000000000000000000000000000000000000000..6ed8020e0b66ef283f87e4e64b16222191d2bbea
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/aioflask/README
@@ -0,0 +1 @@
+Single-database configuration for aioflask.
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask/__pycache__/env.cpython-311.pyc b/venv/Lib/site-packages/flask_migrate/templates/aioflask/__pycache__/env.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1abbbe0119c4caa6b6e59b05222a2c6b24a95afa
Binary files /dev/null and b/venv/Lib/site-packages/flask_migrate/templates/aioflask/__pycache__/env.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask/alembic.ini.mako b/venv/Lib/site-packages/flask_migrate/templates/aioflask/alembic.ini.mako
new file mode 100644
index 0000000000000000000000000000000000000000..ec9d45c26a6bb54e833fd4e6ce2de29343894f4b
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/aioflask/alembic.ini.mako
@@ -0,0 +1,50 @@
+# A generic, single database configuration.
+
+[alembic]
+# template used to generate migration files
+# file_template = %%(rev)s_%%(slug)s
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic,flask_migrate
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[logger_flask_migrate]
+level = INFO
+handlers =
+qualname = flask_migrate
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask/env.py b/venv/Lib/site-packages/flask_migrate/templates/aioflask/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a1ece5acba194e155a145846baa6bfec0231327
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/aioflask/env.py
@@ -0,0 +1,118 @@
+import asyncio
+import logging
+from logging.config import fileConfig
+
+from flask import current_app
+
+from alembic import context
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+fileConfig(config.config_file_name)
+logger = logging.getLogger('alembic.env')
+
+
+def get_engine():
+    try:
+        # this works with Flask-SQLAlchemy<3 and Alchemical
+        return current_app.extensions['migrate'].db.get_engine()
+    except (TypeError, AttributeError):
+        # this works with Flask-SQLAlchemy>=3
+        return current_app.extensions['migrate'].db.engine
+
+
+def get_engine_url():
+    try:
+        return get_engine().url.render_as_string(hide_password=False).replace(
+            '%', '%%')
+    except AttributeError:
+        return str(get_engine().url).replace('%', '%%')
+
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+config.set_main_option('sqlalchemy.url', get_engine_url())
+target_db = current_app.extensions['migrate'].db
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def get_metadata():
+    if hasattr(target_db, 'metadatas'):
+        return target_db.metadatas[None]
+    return target_db.metadata
+
+
+def run_migrations_offline():
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    url = config.get_main_option("sqlalchemy.url")
+    context.configure(
+        url=url, target_metadata=get_metadata(), literal_binds=True
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+def do_run_migrations(connection):
+    # this callback is used to prevent an auto-migration from being generated
+    # when there are no changes to the schema
+    # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
+    def process_revision_directives(context, revision, directives):
+        if getattr(config.cmd_opts, 'autogenerate', False):
+            script = directives[0]
+            if script.upgrade_ops.is_empty():
+                directives[:] = []
+                logger.info('No changes in schema detected.')
+
+    conf_args = current_app.extensions['migrate'].configure_args
+    if conf_args.get("process_revision_directives") is None:
+        conf_args["process_revision_directives"] = process_revision_directives
+
+    context.configure(
+        connection=connection,
+        target_metadata=get_metadata(),
+        **conf_args
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+async def run_migrations_online():
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    connectable = get_engine()
+
+    async with connectable.connect() as connection:
+        await connection.run_sync(do_run_migrations)
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    asyncio.get_event_loop().run_until_complete(run_migrations_online())
diff --git a/venv/Lib/site-packages/flask_migrate/templates/aioflask/script.py.mako b/venv/Lib/site-packages/flask_migrate/templates/aioflask/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..2c0156303a8df3ffdc9de87765bf801bf6bea4a5
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/aioflask/script.py.mako
@@ -0,0 +1,24 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade():
+    ${upgrades if upgrades else "pass"}
+
+
+def downgrade():
+    ${downgrades if downgrades else "pass"}
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/README b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/README
new file mode 100644
index 0000000000000000000000000000000000000000..eaae2511a060b2ed968be4c363abb453cbf0215c
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/README
@@ -0,0 +1 @@
+Multi-database configuration for Flask.
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/__pycache__/env.cpython-311.pyc b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/__pycache__/env.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..795571a8f915fe69a5adc3a909050fd0cecc86fd
Binary files /dev/null and b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/__pycache__/env.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/alembic.ini.mako b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/alembic.ini.mako
new file mode 100644
index 0000000000000000000000000000000000000000..ec9d45c26a6bb54e833fd4e6ce2de29343894f4b
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/alembic.ini.mako
@@ -0,0 +1,50 @@
+# A generic, single database configuration.
+
+[alembic]
+# template used to generate migration files
+# file_template = %%(rev)s_%%(slug)s
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic,flask_migrate
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[logger_flask_migrate]
+level = INFO
+handlers =
+qualname = flask_migrate
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/env.py b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..31b8e7248bac6fe0933b4104092182d6a8d731a9
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/env.py
@@ -0,0 +1,191 @@
+import logging
+from logging.config import fileConfig
+
+from sqlalchemy import MetaData
+from flask import current_app
+
+from alembic import context
+
+USE_TWOPHASE = False
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+fileConfig(config.config_file_name)
+logger = logging.getLogger('alembic.env')
+
+
+def get_engine(bind_key=None):
+    try:
+        # this works with Flask-SQLAlchemy<3 and Alchemical
+        return current_app.extensions['migrate'].db.get_engine(bind=bind_key)
+    except (TypeError, AttributeError):
+        # this works with Flask-SQLAlchemy>=3
+        return current_app.extensions['migrate'].db.engines.get(bind_key)
+
+
+def get_engine_url(bind_key=None):
+    try:
+        return get_engine(bind_key).url.render_as_string(
+            hide_password=False).replace('%', '%%')
+    except AttributeError:
+        return str(get_engine(bind_key).url).replace('%', '%%')
+
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+config.set_main_option('sqlalchemy.url', get_engine_url())
+bind_names = []
+if current_app.config.get('SQLALCHEMY_BINDS') is not None:
+    bind_names = list(current_app.config['SQLALCHEMY_BINDS'].keys())
+else:
+    get_bind_names = getattr(current_app.extensions['migrate'].db,
+                             'bind_names', None)
+    if get_bind_names:
+        bind_names = get_bind_names()
+for bind in bind_names:
+    context.config.set_section_option(
+        bind, "sqlalchemy.url", get_engine_url(bind_key=bind))
+target_db = current_app.extensions['migrate'].db
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def get_metadata(bind):
+    """Return the metadata for a bind."""
+    if bind == '':
+        bind = None
+    if hasattr(target_db, 'metadatas'):
+        return target_db.metadatas[bind]
+
+    # legacy, less flexible implementation
+    m = MetaData()
+    for t in target_db.metadata.tables.values():
+        if t.info.get('bind_key') == bind:
+            t.tometadata(m)
+    return m
+
+
+def run_migrations_offline():
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    # for the --sql use case, run migrations for each URL into
+    # individual files.
+
+    engines = {
+        '': {
+            'url': context.config.get_main_option('sqlalchemy.url')
+        }
+    }
+    for name in bind_names:
+        engines[name] = rec = {}
+        rec['url'] = context.config.get_section_option(name, "sqlalchemy.url")
+
+    for name, rec in engines.items():
+        logger.info("Migrating database %s" % (name or '<default>'))
+        file_ = "%s.sql" % name
+        logger.info("Writing output to %s" % file_)
+        with open(file_, 'w') as buffer:
+            context.configure(
+                url=rec['url'],
+                output_buffer=buffer,
+                target_metadata=get_metadata(name),
+                literal_binds=True,
+            )
+            with context.begin_transaction():
+                context.run_migrations(engine_name=name)
+
+
+def run_migrations_online():
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    # this callback is used to prevent an auto-migration from being generated
+    # when there are no changes to the schema
+    # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
+    def process_revision_directives(context, revision, directives):
+        if getattr(config.cmd_opts, 'autogenerate', False):
+            script = directives[0]
+            if len(script.upgrade_ops_list) >= len(bind_names) + 1:
+                empty = True
+                for upgrade_ops in script.upgrade_ops_list:
+                    if not upgrade_ops.is_empty():
+                        empty = False
+                if empty:
+                    directives[:] = []
+                    logger.info('No changes in schema detected.')
+
+    conf_args = current_app.extensions['migrate'].configure_args
+    if conf_args.get("process_revision_directives") is None:
+        conf_args["process_revision_directives"] = process_revision_directives
+
+    # for the direct-to-DB use case, start a transaction on all
+    # engines, then run all migrations, then commit all transactions.
+    engines = {
+        '': {'engine': get_engine()}
+    }
+    for name in bind_names:
+        engines[name] = rec = {}
+        rec['engine'] = get_engine(bind_key=name)
+
+    for name, rec in engines.items():
+        engine = rec['engine']
+        rec['connection'] = conn = engine.connect()
+
+        if USE_TWOPHASE:
+            rec['transaction'] = conn.begin_twophase()
+        else:
+            rec['transaction'] = conn.begin()
+
+    try:
+        for name, rec in engines.items():
+            logger.info("Migrating database %s" % (name or '<default>'))
+            context.configure(
+                connection=rec['connection'],
+                upgrade_token="%s_upgrades" % name,
+                downgrade_token="%s_downgrades" % name,
+                target_metadata=get_metadata(name),
+                **conf_args
+            )
+            context.run_migrations(engine_name=name)
+
+        if USE_TWOPHASE:
+            for rec in engines.values():
+                rec['transaction'].prepare()
+
+        for rec in engines.values():
+            rec['transaction'].commit()
+    except:  # noqa: E722
+        for rec in engines.values():
+            rec['transaction'].rollback()
+        raise
+    finally:
+        for rec in engines.values():
+            rec['connection'].close()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/script.py.mako b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..3beabc46397a862b61217f3ae5fcb6397a6ca72f
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/flask-multidb/script.py.mako
@@ -0,0 +1,53 @@
+<%!
+import re
+
+%>"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade(engine_name):
+    globals()["upgrade_%s" % engine_name]()
+
+
+def downgrade(engine_name):
+    globals()["downgrade_%s" % engine_name]()
+
+<%
+    from flask import current_app
+    bind_names = []
+    if current_app.config.get('SQLALCHEMY_BINDS') is not None:
+        bind_names = list(current_app.config['SQLALCHEMY_BINDS'].keys())
+    else:
+        get_bind_names = getattr(current_app.extensions['migrate'].db, 'bind_names', None)
+        if get_bind_names:
+            bind_names = get_bind_names()
+    db_names = [''] + bind_names
+%>
+
+## generate an "upgrade_<xyz>() / downgrade_<xyz>()" function
+## for each database name in the ini file.
+
+% for db_name in db_names:
+
+def upgrade_${db_name}():
+    ${context.get("%s_upgrades" % db_name, "pass")}
+
+
+def downgrade_${db_name}():
+    ${context.get("%s_downgrades" % db_name, "pass")}
+
+% endfor
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask/README b/venv/Lib/site-packages/flask_migrate/templates/flask/README
new file mode 100644
index 0000000000000000000000000000000000000000..0e048441597444a7e2850d6d7c4ce15550f79bda
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/flask/README
@@ -0,0 +1 @@
+Single-database configuration for Flask.
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask/__pycache__/env.cpython-311.pyc b/venv/Lib/site-packages/flask_migrate/templates/flask/__pycache__/env.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab1e4351c78e927a57c20bea7eb39c91dba07042
Binary files /dev/null and b/venv/Lib/site-packages/flask_migrate/templates/flask/__pycache__/env.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask/alembic.ini.mako b/venv/Lib/site-packages/flask_migrate/templates/flask/alembic.ini.mako
new file mode 100644
index 0000000000000000000000000000000000000000..ec9d45c26a6bb54e833fd4e6ce2de29343894f4b
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/flask/alembic.ini.mako
@@ -0,0 +1,50 @@
+# A generic, single database configuration.
+
+[alembic]
+# template used to generate migration files
+# file_template = %%(rev)s_%%(slug)s
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic,flask_migrate
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[logger_flask_migrate]
+level = INFO
+handlers =
+qualname = flask_migrate
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask/env.py b/venv/Lib/site-packages/flask_migrate/templates/flask/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c9709271b2ff28271b13c29bba5c50b80fea0ac
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/flask/env.py
@@ -0,0 +1,113 @@
+import logging
+from logging.config import fileConfig
+
+from flask import current_app
+
+from alembic import context
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+fileConfig(config.config_file_name)
+logger = logging.getLogger('alembic.env')
+
+
+def get_engine():
+    try:
+        # this works with Flask-SQLAlchemy<3 and Alchemical
+        return current_app.extensions['migrate'].db.get_engine()
+    except (TypeError, AttributeError):
+        # this works with Flask-SQLAlchemy>=3
+        return current_app.extensions['migrate'].db.engine
+
+
+def get_engine_url():
+    try:
+        return get_engine().url.render_as_string(hide_password=False).replace(
+            '%', '%%')
+    except AttributeError:
+        return str(get_engine().url).replace('%', '%%')
+
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+config.set_main_option('sqlalchemy.url', get_engine_url())
+target_db = current_app.extensions['migrate'].db
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def get_metadata():
+    if hasattr(target_db, 'metadatas'):
+        return target_db.metadatas[None]
+    return target_db.metadata
+
+
+def run_migrations_offline():
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    url = config.get_main_option("sqlalchemy.url")
+    context.configure(
+        url=url, target_metadata=get_metadata(), literal_binds=True
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+def run_migrations_online():
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+
+    # this callback is used to prevent an auto-migration from being generated
+    # when there are no changes to the schema
+    # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
+    def process_revision_directives(context, revision, directives):
+        if getattr(config.cmd_opts, 'autogenerate', False):
+            script = directives[0]
+            if script.upgrade_ops.is_empty():
+                directives[:] = []
+                logger.info('No changes in schema detected.')
+
+    conf_args = current_app.extensions['migrate'].configure_args
+    if conf_args.get("process_revision_directives") is None:
+        conf_args["process_revision_directives"] = process_revision_directives
+
+    connectable = get_engine()
+
+    with connectable.connect() as connection:
+        context.configure(
+            connection=connection,
+            target_metadata=get_metadata(),
+            **conf_args
+        )
+
+        with context.begin_transaction():
+            context.run_migrations()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/venv/Lib/site-packages/flask_migrate/templates/flask/script.py.mako b/venv/Lib/site-packages/flask_migrate/templates/flask/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..2c0156303a8df3ffdc9de87765bf801bf6bea4a5
--- /dev/null
+++ b/venv/Lib/site-packages/flask_migrate/templates/flask/script.py.mako
@@ -0,0 +1,24 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade():
+    ${upgrades if upgrades else "pass"}
+
+
+def downgrade():
+    ${downgrades if downgrades else "pass"}
diff --git a/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/INSTALLER b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/METADATA b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..92f1ff25658838ae9639052003770899dc3851cb
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/METADATA
@@ -0,0 +1,72 @@
+Metadata-Version: 2.1
+Name: Flask-WTF
+Version: 1.2.1
+Summary: Form rendering, validation, and CSRF protection for Flask with WTForms.
+Project-URL: Documentation, https://flask-wtf.readthedocs.io/
+Project-URL: Changes, https://flask-wtf.readthedocs.io/changes/
+Project-URL: Source Code, https://github.com/wtforms/flask-wtf/
+Project-URL: Issue Tracker, https://github.com/wtforms/flask-wtf/issues/
+Project-URL: Chat, https://discord.gg/pallets
+Maintainer: WTForms
+License: Copyright 2010 WTForms
+        
+        Redistribution and use in source and binary forms, with or without
+        modification, are permitted provided that the following conditions are
+        met:
+        
+        1.  Redistributions of source code must retain the above copyright
+            notice, this list of conditions and the following disclaimer.
+        
+        2.  Redistributions in binary form must reproduce the above copyright
+            notice, this list of conditions and the following disclaimer in the
+            documentation and/or other materials provided with the distribution.
+        
+        3.  Neither the name of the copyright holder nor the names of its
+            contributors may be used to endorse or promote products derived from
+            this software without specific prior written permission.
+        
+        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+        "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+        LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+        PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+        HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+        SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+        TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+        PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+        LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+        NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+        SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+License-File: LICENSE.rst
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Internet :: WWW/HTTP :: WSGI
+Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application
+Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
+Requires-Python: >=3.8
+Requires-Dist: flask
+Requires-Dist: itsdangerous
+Requires-Dist: wtforms
+Provides-Extra: email
+Requires-Dist: email-validator; extra == 'email'
+Description-Content-Type: text/x-rst
+
+Flask-WTF
+=========
+
+Simple integration of Flask and WTForms, including CSRF, file upload,
+and reCAPTCHA.
+
+Links
+-----
+
+-   Documentation: https://flask-wtf.readthedocs.io/
+-   Changes: https://flask-wtf.readthedocs.io/changes/
+-   PyPI Releases: https://pypi.org/project/Flask-WTF/
+-   Source Code: https://github.com/wtforms/flask-wtf/
+-   Issue Tracker: https://github.com/wtforms/flask-wtf/issues/
+-   Chat: https://discord.gg/pallets
diff --git a/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/RECORD b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..a7814665f525f142935e652b0eafe2213194eccf
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/RECORD
@@ -0,0 +1,26 @@
+flask_wtf-1.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+flask_wtf-1.2.1.dist-info/METADATA,sha256=9Y5upDJ7WU2m2l4erWImF3HcVSWIZKH3TdX6klYpq4M,3373
+flask_wtf-1.2.1.dist-info/RECORD,,
+flask_wtf-1.2.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+flask_wtf-1.2.1.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87
+flask_wtf-1.2.1.dist-info/licenses/LICENSE.rst,sha256=1fGQNkUVeMs27u8EyZ6_fXyi5w3PBDY2UZvEIOFafGI,1475
+flask_wtf/__init__.py,sha256=x6ydw5SJzsXZgz-Y6IM_95Sy8VufRepvZH1DUIlFoTo,214
+flask_wtf/__pycache__/__init__.cpython-311.pyc,,
+flask_wtf/__pycache__/_compat.cpython-311.pyc,,
+flask_wtf/__pycache__/csrf.cpython-311.pyc,,
+flask_wtf/__pycache__/file.cpython-311.pyc,,
+flask_wtf/__pycache__/form.cpython-311.pyc,,
+flask_wtf/__pycache__/i18n.cpython-311.pyc,,
+flask_wtf/_compat.py,sha256=N3sqC9yzFWY-3MZ7QazX1sidvkO3d5yy4NR6lkp0s94,248
+flask_wtf/csrf.py,sha256=O-fjnWygxxi_FsIU2koua97ZpIhiOJVDHA57dXLpvTA,10171
+flask_wtf/file.py,sha256=AsfkYTCgtqGWySimc_NjeAxg-DtpdcthhqMLrXIDAhU,4706
+flask_wtf/form.py,sha256=TmR7xCrxin2LHp6thn7fq1OeU8aLB7xsZzvv52nH7Ss,4049
+flask_wtf/i18n.py,sha256=TyO8gqt9DocHMSaNhj0KKgxoUrPYs-G1nVW-jns0SOw,1166
+flask_wtf/recaptcha/__init__.py,sha256=m4eNGoU3Q0Wnt_wP8VvOlA0mwWuoMtAcK9pYT7sPFp8,106
+flask_wtf/recaptcha/__pycache__/__init__.cpython-311.pyc,,
+flask_wtf/recaptcha/__pycache__/fields.cpython-311.pyc,,
+flask_wtf/recaptcha/__pycache__/validators.cpython-311.pyc,,
+flask_wtf/recaptcha/__pycache__/widgets.cpython-311.pyc,,
+flask_wtf/recaptcha/fields.py,sha256=M1-RFuUKOsJAzsLm3xaaxuhX2bB9oRqS-HVSN-NpkmI,433
+flask_wtf/recaptcha/validators.py,sha256=3sd1mUQT3Y3D_WJeKwecxUGstnhh_QD-A_dEBJfkf6s,2434
+flask_wtf/recaptcha/widgets.py,sha256=J_XyxAZt3uB15diIMnkXXGII2dmsWCsVsKV3KQYn4Ns,1512
diff --git a/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/REQUESTED b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/WHEEL b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..ba1a8af28bcccdacebb8c22dfda1537447a1a58a
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.18.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/licenses/LICENSE.rst b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/licenses/LICENSE.rst
new file mode 100644
index 0000000000000000000000000000000000000000..63c3617a2d7164d30cae358c23eb3f75b5a758a1
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf-1.2.1.dist-info/licenses/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2010 WTForms
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1.  Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+2.  Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+3.  Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/venv/Lib/site-packages/flask_wtf/__init__.py b/venv/Lib/site-packages/flask_wtf/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..be2649e26d8dfa2cde5457f13b72715135d12b5a
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/__init__.py
@@ -0,0 +1,8 @@
+from .csrf import CSRFProtect
+from .form import FlaskForm
+from .form import Form
+from .recaptcha import Recaptcha
+from .recaptcha import RecaptchaField
+from .recaptcha import RecaptchaWidget
+
+__version__ = "1.2.1"
diff --git a/venv/Lib/site-packages/flask_wtf/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..304531c8dd034ce8289de1e379acd11a66bf38cd
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/__pycache__/_compat.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/__pycache__/_compat.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..beb00be94d82d4c7316bfcf1139bf843f1194cc5
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/__pycache__/_compat.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/__pycache__/csrf.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/__pycache__/csrf.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3a6e9a34361fdea3a9249ed18fe09ac5474f45b6
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/__pycache__/csrf.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/__pycache__/file.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/__pycache__/file.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bac198e45ca285c998a6e60eec1a57deacb716eb
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/__pycache__/file.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/__pycache__/form.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/__pycache__/form.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..156609e14de7e83103f2a2c776a0a668e0e6b194
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/__pycache__/form.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/__pycache__/i18n.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/__pycache__/i18n.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04d2d7e2cbe3df7f61e7c03d89b95925494f4bec
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/__pycache__/i18n.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/_compat.py b/venv/Lib/site-packages/flask_wtf/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..50973e063bbdbd6982fc9501221603efbc2e88f9
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/_compat.py
@@ -0,0 +1,11 @@
+import warnings
+
+
+class FlaskWTFDeprecationWarning(DeprecationWarning):
+    pass
+
+
+warnings.simplefilter("always", FlaskWTFDeprecationWarning)
+warnings.filterwarnings(
+    "ignore", category=FlaskWTFDeprecationWarning, module="wtforms|flask_wtf"
+)
diff --git a/venv/Lib/site-packages/flask_wtf/csrf.py b/venv/Lib/site-packages/flask_wtf/csrf.py
new file mode 100644
index 0000000000000000000000000000000000000000..06afa0cd4ef3670ca3357d47bbecc2baa7e18fe0
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/csrf.py
@@ -0,0 +1,329 @@
+import hashlib
+import hmac
+import logging
+import os
+from urllib.parse import urlparse
+
+from flask import Blueprint
+from flask import current_app
+from flask import g
+from flask import request
+from flask import session
+from itsdangerous import BadData
+from itsdangerous import SignatureExpired
+from itsdangerous import URLSafeTimedSerializer
+from werkzeug.exceptions import BadRequest
+from wtforms import ValidationError
+from wtforms.csrf.core import CSRF
+
+__all__ = ("generate_csrf", "validate_csrf", "CSRFProtect")
+logger = logging.getLogger(__name__)
+
+
+def generate_csrf(secret_key=None, token_key=None):
+    """Generate a CSRF token. The token is cached for a request, so multiple
+    calls to this function will generate the same token.
+
+    During testing, it might be useful to access the signed token in
+    ``g.csrf_token`` and the raw token in ``session['csrf_token']``.
+
+    :param secret_key: Used to securely sign the token. Default is
+        ``WTF_CSRF_SECRET_KEY`` or ``SECRET_KEY``.
+    :param token_key: Key where token is stored in session for comparison.
+        Default is ``WTF_CSRF_FIELD_NAME`` or ``'csrf_token'``.
+    """
+
+    secret_key = _get_config(
+        secret_key,
+        "WTF_CSRF_SECRET_KEY",
+        current_app.secret_key,
+        message="A secret key is required to use CSRF.",
+    )
+    field_name = _get_config(
+        token_key,
+        "WTF_CSRF_FIELD_NAME",
+        "csrf_token",
+        message="A field name is required to use CSRF.",
+    )
+
+    if field_name not in g:
+        s = URLSafeTimedSerializer(secret_key, salt="wtf-csrf-token")
+
+        if field_name not in session:
+            session[field_name] = hashlib.sha1(os.urandom(64)).hexdigest()
+
+        try:
+            token = s.dumps(session[field_name])
+        except TypeError:
+            session[field_name] = hashlib.sha1(os.urandom(64)).hexdigest()
+            token = s.dumps(session[field_name])
+
+        setattr(g, field_name, token)
+
+    return g.get(field_name)
+
+
+def validate_csrf(data, secret_key=None, time_limit=None, token_key=None):
+    """Check if the given data is a valid CSRF token. This compares the given
+    signed token to the one stored in the session.
+
+    :param data: The signed CSRF token to be checked.
+    :param secret_key: Used to securely sign the token. Default is
+        ``WTF_CSRF_SECRET_KEY`` or ``SECRET_KEY``.
+    :param time_limit: Number of seconds that the token is valid. Default is
+        ``WTF_CSRF_TIME_LIMIT`` or 3600 seconds (60 minutes).
+    :param token_key: Key where token is stored in session for comparison.
+        Default is ``WTF_CSRF_FIELD_NAME`` or ``'csrf_token'``.
+
+    :raises ValidationError: Contains the reason that validation failed.
+
+    .. versionchanged:: 0.14
+        Raises ``ValidationError`` with a specific error message rather than
+        returning ``True`` or ``False``.
+    """
+
+    secret_key = _get_config(
+        secret_key,
+        "WTF_CSRF_SECRET_KEY",
+        current_app.secret_key,
+        message="A secret key is required to use CSRF.",
+    )
+    field_name = _get_config(
+        token_key,
+        "WTF_CSRF_FIELD_NAME",
+        "csrf_token",
+        message="A field name is required to use CSRF.",
+    )
+    time_limit = _get_config(time_limit, "WTF_CSRF_TIME_LIMIT", 3600, required=False)
+
+    if not data:
+        raise ValidationError("The CSRF token is missing.")
+
+    if field_name not in session:
+        raise ValidationError("The CSRF session token is missing.")
+
+    s = URLSafeTimedSerializer(secret_key, salt="wtf-csrf-token")
+
+    try:
+        token = s.loads(data, max_age=time_limit)
+    except SignatureExpired as e:
+        raise ValidationError("The CSRF token has expired.") from e
+    except BadData as e:
+        raise ValidationError("The CSRF token is invalid.") from e
+
+    if not hmac.compare_digest(session[field_name], token):
+        raise ValidationError("The CSRF tokens do not match.")
+
+
+def _get_config(
+    value, config_name, default=None, required=True, message="CSRF is not configured."
+):
+    """Find config value based on provided value, Flask config, and default
+    value.
+
+    :param value: already provided config value
+    :param config_name: Flask ``config`` key
+    :param default: default value if not provided or configured
+    :param required: whether the value must not be ``None``
+    :param message: error message if required config is not found
+    :raises KeyError: if required config is not found
+    """
+
+    if value is None:
+        value = current_app.config.get(config_name, default)
+
+    if required and value is None:
+        raise RuntimeError(message)
+
+    return value
+
+
+class _FlaskFormCSRF(CSRF):
+    def setup_form(self, form):
+        self.meta = form.meta
+        return super().setup_form(form)
+
+    def generate_csrf_token(self, csrf_token_field):
+        return generate_csrf(
+            secret_key=self.meta.csrf_secret, token_key=self.meta.csrf_field_name
+        )
+
+    def validate_csrf_token(self, form, field):
+        if g.get("csrf_valid", False):
+            # already validated by CSRFProtect
+            return
+
+        try:
+            validate_csrf(
+                field.data,
+                self.meta.csrf_secret,
+                self.meta.csrf_time_limit,
+                self.meta.csrf_field_name,
+            )
+        except ValidationError as e:
+            logger.info(e.args[0])
+            raise
+
+
+class CSRFProtect:
+    """Enable CSRF protection globally for a Flask app.
+
+    ::
+
+        app = Flask(__name__)
+        csrf = CSRFProtect(app)
+
+    Checks the ``csrf_token`` field sent with forms, or the ``X-CSRFToken``
+    header sent with JavaScript requests. Render the token in templates using
+    ``{{ csrf_token() }}``.
+
+    See the :ref:`csrf` documentation.
+    """
+
+    def __init__(self, app=None):
+        self._exempt_views = set()
+        self._exempt_blueprints = set()
+
+        if app:
+            self.init_app(app)
+
+    def init_app(self, app):
+        app.extensions["csrf"] = self
+
+        app.config.setdefault("WTF_CSRF_ENABLED", True)
+        app.config.setdefault("WTF_CSRF_CHECK_DEFAULT", True)
+        app.config["WTF_CSRF_METHODS"] = set(
+            app.config.get("WTF_CSRF_METHODS", ["POST", "PUT", "PATCH", "DELETE"])
+        )
+        app.config.setdefault("WTF_CSRF_FIELD_NAME", "csrf_token")
+        app.config.setdefault("WTF_CSRF_HEADERS", ["X-CSRFToken", "X-CSRF-Token"])
+        app.config.setdefault("WTF_CSRF_TIME_LIMIT", 3600)
+        app.config.setdefault("WTF_CSRF_SSL_STRICT", True)
+
+        app.jinja_env.globals["csrf_token"] = generate_csrf
+        app.context_processor(lambda: {"csrf_token": generate_csrf})
+
+        @app.before_request
+        def csrf_protect():
+            if not app.config["WTF_CSRF_ENABLED"]:
+                return
+
+            if not app.config["WTF_CSRF_CHECK_DEFAULT"]:
+                return
+
+            if request.method not in app.config["WTF_CSRF_METHODS"]:
+                return
+
+            if not request.endpoint:
+                return
+
+            if app.blueprints.get(request.blueprint) in self._exempt_blueprints:
+                return
+
+            view = app.view_functions.get(request.endpoint)
+            dest = f"{view.__module__}.{view.__name__}"
+
+            if dest in self._exempt_views:
+                return
+
+            self.protect()
+
+    def _get_csrf_token(self):
+        # find the token in the form data
+        field_name = current_app.config["WTF_CSRF_FIELD_NAME"]
+        base_token = request.form.get(field_name)
+
+        if base_token:
+            return base_token
+
+        # if the form has a prefix, the name will be {prefix}-csrf_token
+        for key in request.form:
+            if key.endswith(field_name):
+                csrf_token = request.form[key]
+
+                if csrf_token:
+                    return csrf_token
+
+        # find the token in the headers
+        for header_name in current_app.config["WTF_CSRF_HEADERS"]:
+            csrf_token = request.headers.get(header_name)
+
+            if csrf_token:
+                return csrf_token
+
+        return None
+
+    def protect(self):
+        if request.method not in current_app.config["WTF_CSRF_METHODS"]:
+            return
+
+        try:
+            validate_csrf(self._get_csrf_token())
+        except ValidationError as e:
+            logger.info(e.args[0])
+            self._error_response(e.args[0])
+
+        if request.is_secure and current_app.config["WTF_CSRF_SSL_STRICT"]:
+            if not request.referrer:
+                self._error_response("The referrer header is missing.")
+
+            good_referrer = f"https://{request.host}/"
+
+            if not same_origin(request.referrer, good_referrer):
+                self._error_response("The referrer does not match the host.")
+
+        g.csrf_valid = True  # mark this request as CSRF valid
+
+    def exempt(self, view):
+        """Mark a view or blueprint to be excluded from CSRF protection.
+
+        ::
+
+            @app.route('/some-view', methods=['POST'])
+            @csrf.exempt
+            def some_view():
+                ...
+
+        ::
+
+            bp = Blueprint(...)
+            csrf.exempt(bp)
+
+        """
+
+        if isinstance(view, Blueprint):
+            self._exempt_blueprints.add(view)
+            return view
+
+        if isinstance(view, str):
+            view_location = view
+        else:
+            view_location = ".".join((view.__module__, view.__name__))
+
+        self._exempt_views.add(view_location)
+        return view
+
+    def _error_response(self, reason):
+        raise CSRFError(reason)
+
+
+class CSRFError(BadRequest):
+    """Raise if the client sends invalid CSRF data with the request.
+
+    Generates a 400 Bad Request response with the failure reason by default.
+    Customize the response by registering a handler with
+    :meth:`flask.Flask.errorhandler`.
+    """
+
+    description = "CSRF validation failed."
+
+
+def same_origin(current_uri, compare_uri):
+    current = urlparse(current_uri)
+    compare = urlparse(compare_uri)
+
+    return (
+        current.scheme == compare.scheme
+        and current.hostname == compare.hostname
+        and current.port == compare.port
+    )
diff --git a/venv/Lib/site-packages/flask_wtf/file.py b/venv/Lib/site-packages/flask_wtf/file.py
new file mode 100644
index 0000000000000000000000000000000000000000..a720dff8d81911df179e80512caa0056a47be410
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/file.py
@@ -0,0 +1,147 @@
+from collections import abc
+
+from werkzeug.datastructures import FileStorage
+from wtforms import FileField as _FileField
+from wtforms import MultipleFileField as _MultipleFileField
+from wtforms.validators import DataRequired
+from wtforms.validators import StopValidation
+from wtforms.validators import ValidationError
+
+
+class FileField(_FileField):
+    """Werkzeug-aware subclass of :class:`wtforms.fields.FileField`."""
+
+    def process_formdata(self, valuelist):
+        valuelist = (x for x in valuelist if isinstance(x, FileStorage) and x)
+        data = next(valuelist, None)
+
+        if data is not None:
+            self.data = data
+        else:
+            self.raw_data = ()
+
+
+class MultipleFileField(_MultipleFileField):
+    """Werkzeug-aware subclass of :class:`wtforms.fields.MultipleFileField`.
+
+    .. versionadded:: 1.2.0
+    """
+
+    def process_formdata(self, valuelist):
+        valuelist = (x for x in valuelist if isinstance(x, FileStorage) and x)
+        data = list(valuelist) or None
+
+        if data is not None:
+            self.data = data
+        else:
+            self.raw_data = ()
+
+
+class FileRequired(DataRequired):
+    """Validates that the uploaded files(s) is a Werkzeug
+    :class:`~werkzeug.datastructures.FileStorage` object.
+
+    :param message: error message
+
+    You can also use the synonym ``file_required``.
+    """
+
+    def __call__(self, form, field):
+        field_data = [field.data] if not isinstance(field.data, list) else field.data
+        if not (
+            all(isinstance(x, FileStorage) and x for x in field_data) and field_data
+        ):
+            raise StopValidation(
+                self.message or field.gettext("This field is required.")
+            )
+
+
+file_required = FileRequired
+
+
+class FileAllowed:
+    """Validates that the uploaded file(s) is allowed by a given list of
+    extensions or a Flask-Uploads :class:`~flaskext.uploads.UploadSet`.
+
+    :param upload_set: A list of extensions or an
+        :class:`~flaskext.uploads.UploadSet`
+    :param message: error message
+
+    You can also use the synonym ``file_allowed``.
+    """
+
+    def __init__(self, upload_set, message=None):
+        self.upload_set = upload_set
+        self.message = message
+
+    def __call__(self, form, field):
+        field_data = [field.data] if not isinstance(field.data, list) else field.data
+        if not (
+            all(isinstance(x, FileStorage) and x for x in field_data) and field_data
+        ):
+            return
+
+        filenames = [f.filename.lower() for f in field_data]
+
+        for filename in filenames:
+            if isinstance(self.upload_set, abc.Iterable):
+                if any(filename.endswith("." + x) for x in self.upload_set):
+                    continue
+
+                raise StopValidation(
+                    self.message
+                    or field.gettext(
+                        "File does not have an approved extension: {extensions}"
+                    ).format(extensions=", ".join(self.upload_set))
+                )
+
+            if not self.upload_set.file_allowed(field_data, filename):
+                raise StopValidation(
+                    self.message
+                    or field.gettext("File does not have an approved extension.")
+                )
+
+
+file_allowed = FileAllowed
+
+
+class FileSize:
+    """Validates that the uploaded file(s) is within a minimum and maximum
+    file size (set in bytes).
+
+    :param min_size: minimum allowed file size (in bytes). Defaults to 0 bytes.
+    :param max_size: maximum allowed file size (in bytes).
+    :param message: error message
+
+    You can also use the synonym ``file_size``.
+    """
+
+    def __init__(self, max_size, min_size=0, message=None):
+        self.min_size = min_size
+        self.max_size = max_size
+        self.message = message
+
+    def __call__(self, form, field):
+        field_data = [field.data] if not isinstance(field.data, list) else field.data
+        if not (
+            all(isinstance(x, FileStorage) and x for x in field_data) and field_data
+        ):
+            return
+
+        for f in field_data:
+            file_size = len(f.read())
+            f.seek(0)  # reset cursor position to beginning of file
+
+            if (file_size < self.min_size) or (file_size > self.max_size):
+                # the file is too small or too big => validation failure
+                raise ValidationError(
+                    self.message
+                    or field.gettext(
+                        "File must be between {min_size} and {max_size} bytes.".format(
+                            min_size=self.min_size, max_size=self.max_size
+                        )
+                    )
+                )
+
+
+file_size = FileSize
diff --git a/venv/Lib/site-packages/flask_wtf/form.py b/venv/Lib/site-packages/flask_wtf/form.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7f52e022c82fe43d6674377e5df040c82d10d79
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/form.py
@@ -0,0 +1,127 @@
+from flask import current_app
+from flask import request
+from flask import session
+from markupsafe import Markup
+from werkzeug.datastructures import CombinedMultiDict
+from werkzeug.datastructures import ImmutableMultiDict
+from werkzeug.utils import cached_property
+from wtforms import Form
+from wtforms.meta import DefaultMeta
+from wtforms.widgets import HiddenInput
+
+from .csrf import _FlaskFormCSRF
+
+try:
+    from .i18n import translations
+except ImportError:
+    translations = None  # babel not installed
+
+
+SUBMIT_METHODS = {"POST", "PUT", "PATCH", "DELETE"}
+_Auto = object()
+
+
+class FlaskForm(Form):
+    """Flask-specific subclass of WTForms :class:`~wtforms.form.Form`.
+
+    If ``formdata`` is not specified, this will use :attr:`flask.request.form`
+    and :attr:`flask.request.files`.  Explicitly pass ``formdata=None`` to
+    prevent this.
+    """
+
+    class Meta(DefaultMeta):
+        csrf_class = _FlaskFormCSRF
+        csrf_context = session  # not used, provided for custom csrf_class
+
+        @cached_property
+        def csrf(self):
+            return current_app.config.get("WTF_CSRF_ENABLED", True)
+
+        @cached_property
+        def csrf_secret(self):
+            return current_app.config.get("WTF_CSRF_SECRET_KEY", current_app.secret_key)
+
+        @cached_property
+        def csrf_field_name(self):
+            return current_app.config.get("WTF_CSRF_FIELD_NAME", "csrf_token")
+
+        @cached_property
+        def csrf_time_limit(self):
+            return current_app.config.get("WTF_CSRF_TIME_LIMIT", 3600)
+
+        def wrap_formdata(self, form, formdata):
+            if formdata is _Auto:
+                if _is_submitted():
+                    if request.files:
+                        return CombinedMultiDict((request.files, request.form))
+                    elif request.form:
+                        return request.form
+                    elif request.is_json:
+                        return ImmutableMultiDict(request.get_json())
+
+                return None
+
+            return formdata
+
+        def get_translations(self, form):
+            if not current_app.config.get("WTF_I18N_ENABLED", True):
+                return super().get_translations(form)
+
+            return translations
+
+    def __init__(self, formdata=_Auto, **kwargs):
+        super().__init__(formdata=formdata, **kwargs)
+
+    def is_submitted(self):
+        """Consider the form submitted if there is an active request and
+        the method is ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
+        """
+
+        return _is_submitted()
+
+    def validate_on_submit(self, extra_validators=None):
+        """Call :meth:`validate` only if the form is submitted.
+        This is a shortcut for ``form.is_submitted() and form.validate()``.
+        """
+        return self.is_submitted() and self.validate(extra_validators=extra_validators)
+
+    def hidden_tag(self, *fields):
+        """Render the form's hidden fields in one call.
+
+        A field is considered hidden if it uses the
+        :class:`~wtforms.widgets.HiddenInput` widget.
+
+        If ``fields`` are given, only render the given fields that
+        are hidden.  If a string is passed, render the field with that
+        name if it exists.
+
+        .. versionchanged:: 0.13
+
+           No longer wraps inputs in hidden div.
+           This is valid HTML 5.
+
+        .. versionchanged:: 0.13
+
+           Skip passed fields that aren't hidden.
+           Skip passed names that don't exist.
+        """
+
+        def hidden_fields(fields):
+            for f in fields:
+                if isinstance(f, str):
+                    f = getattr(self, f, None)
+
+                if f is None or not isinstance(f.widget, HiddenInput):
+                    continue
+
+                yield f
+
+        return Markup("\n".join(str(f) for f in hidden_fields(fields or self)))
+
+
+def _is_submitted():
+    """Consider the form submitted if there is an active request and
+    the method is ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
+    """
+
+    return bool(request) and request.method in SUBMIT_METHODS
diff --git a/venv/Lib/site-packages/flask_wtf/i18n.py b/venv/Lib/site-packages/flask_wtf/i18n.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cc0e9c5a6d2fee8d18c4f46a79ed82f93d132a7
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/i18n.py
@@ -0,0 +1,47 @@
+from babel import support
+from flask import current_app
+from flask import request
+from flask_babel import get_locale
+from wtforms.i18n import messages_path
+
+__all__ = ("Translations", "translations")
+
+
+def _get_translations():
+    """Returns the correct gettext translations.
+    Copy from flask-babel with some modifications.
+    """
+
+    if not request:
+        return None
+
+    # babel should be in extensions for get_locale
+    if "babel" not in current_app.extensions:
+        return None
+
+    translations = getattr(request, "wtforms_translations", None)
+
+    if translations is None:
+        translations = support.Translations.load(
+            messages_path(), [get_locale()], domain="wtforms"
+        )
+        request.wtforms_translations = translations
+
+    return translations
+
+
+class Translations:
+    def gettext(self, string):
+        t = _get_translations()
+        return string if t is None else t.ugettext(string)
+
+    def ngettext(self, singular, plural, n):
+        t = _get_translations()
+
+        if t is None:
+            return singular if n == 1 else plural
+
+        return t.ungettext(singular, plural, n)
+
+
+translations = Translations()
diff --git a/venv/Lib/site-packages/flask_wtf/recaptcha/__init__.py b/venv/Lib/site-packages/flask_wtf/recaptcha/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3100d37e3389219d98787b585357edbe0d9bcc37
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/recaptcha/__init__.py
@@ -0,0 +1,3 @@
+from .fields import RecaptchaField
+from .validators import Recaptcha
+from .widgets import RecaptchaWidget
diff --git a/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ad5312065a905cf14e41f922291566616856c8fb
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/fields.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/fields.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1eb5b704de329b4e160ea7d0eefa8dd48b16104d
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/fields.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/validators.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/validators.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8a1ffd5e1f5b5180ec496e94c61d8d59e9302f6
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/validators.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/widgets.cpython-311.pyc b/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/widgets.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f5bb03a28b7e7b55f3ef3a93a63eda13b3d3754
Binary files /dev/null and b/venv/Lib/site-packages/flask_wtf/recaptcha/__pycache__/widgets.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/flask_wtf/recaptcha/fields.py b/venv/Lib/site-packages/flask_wtf/recaptcha/fields.py
new file mode 100644
index 0000000000000000000000000000000000000000..e91fd092f98c01932a90ffafe68bbf98390ff2ed
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/recaptcha/fields.py
@@ -0,0 +1,17 @@
+from wtforms.fields import Field
+
+from . import widgets
+from .validators import Recaptcha
+
+__all__ = ["RecaptchaField"]
+
+
+class RecaptchaField(Field):
+    widget = widgets.RecaptchaWidget()
+
+    # error message if recaptcha validation fails
+    recaptcha_error = None
+
+    def __init__(self, label="", validators=None, **kwargs):
+        validators = validators or [Recaptcha()]
+        super().__init__(label, validators, **kwargs)
diff --git a/venv/Lib/site-packages/flask_wtf/recaptcha/validators.py b/venv/Lib/site-packages/flask_wtf/recaptcha/validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5cafb3478cd644a199fb731cf5d2c0c440f986c
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/recaptcha/validators.py
@@ -0,0 +1,75 @@
+import json
+from urllib import request as http
+from urllib.parse import urlencode
+
+from flask import current_app
+from flask import request
+from wtforms import ValidationError
+
+RECAPTCHA_VERIFY_SERVER_DEFAULT = "https://www.google.com/recaptcha/api/siteverify"
+RECAPTCHA_ERROR_CODES = {
+    "missing-input-secret": "The secret parameter is missing.",
+    "invalid-input-secret": "The secret parameter is invalid or malformed.",
+    "missing-input-response": "The response parameter is missing.",
+    "invalid-input-response": "The response parameter is invalid or malformed.",
+}
+
+
+__all__ = ["Recaptcha"]
+
+
+class Recaptcha:
+    """Validates a ReCaptcha."""
+
+    def __init__(self, message=None):
+        if message is None:
+            message = RECAPTCHA_ERROR_CODES["missing-input-response"]
+        self.message = message
+
+    def __call__(self, form, field):
+        if current_app.testing:
+            return True
+
+        if request.is_json:
+            response = request.json.get("g-recaptcha-response", "")
+        else:
+            response = request.form.get("g-recaptcha-response", "")
+        remote_ip = request.remote_addr
+
+        if not response:
+            raise ValidationError(field.gettext(self.message))
+
+        if not self._validate_recaptcha(response, remote_ip):
+            field.recaptcha_error = "incorrect-captcha-sol"
+            raise ValidationError(field.gettext(self.message))
+
+    def _validate_recaptcha(self, response, remote_addr):
+        """Performs the actual validation."""
+        try:
+            private_key = current_app.config["RECAPTCHA_PRIVATE_KEY"]
+        except KeyError:
+            raise RuntimeError("No RECAPTCHA_PRIVATE_KEY config set") from None
+
+        verify_server = current_app.config.get("RECAPTCHA_VERIFY_SERVER")
+        if not verify_server:
+            verify_server = RECAPTCHA_VERIFY_SERVER_DEFAULT
+
+        data = urlencode(
+            {"secret": private_key, "remoteip": remote_addr, "response": response}
+        )
+
+        http_response = http.urlopen(verify_server, data.encode("utf-8"))
+
+        if http_response.code != 200:
+            return False
+
+        json_resp = json.loads(http_response.read())
+
+        if json_resp["success"]:
+            return True
+
+        for error in json_resp.get("error-codes", []):
+            if error in RECAPTCHA_ERROR_CODES:
+                raise ValidationError(RECAPTCHA_ERROR_CODES[error])
+
+        return False
diff --git a/venv/Lib/site-packages/flask_wtf/recaptcha/widgets.py b/venv/Lib/site-packages/flask_wtf/recaptcha/widgets.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfae830bb17f188f2123931847f958590c77e690
--- /dev/null
+++ b/venv/Lib/site-packages/flask_wtf/recaptcha/widgets.py
@@ -0,0 +1,43 @@
+from urllib.parse import urlencode
+
+from flask import current_app
+from markupsafe import Markup
+
+RECAPTCHA_SCRIPT_DEFAULT = "https://www.google.com/recaptcha/api.js"
+RECAPTCHA_DIV_CLASS_DEFAULT = "g-recaptcha"
+RECAPTCHA_TEMPLATE = """
+<script src='%s' async defer></script>
+<div class="%s" %s></div>
+"""
+
+__all__ = ["RecaptchaWidget"]
+
+
+class RecaptchaWidget:
+    def recaptcha_html(self, public_key):
+        html = current_app.config.get("RECAPTCHA_HTML")
+        if html:
+            return Markup(html)
+        params = current_app.config.get("RECAPTCHA_PARAMETERS")
+        script = current_app.config.get("RECAPTCHA_SCRIPT")
+        if not script:
+            script = RECAPTCHA_SCRIPT_DEFAULT
+        if params:
+            script += "?" + urlencode(params)
+        attrs = current_app.config.get("RECAPTCHA_DATA_ATTRS", {})
+        attrs["sitekey"] = public_key
+        snippet = " ".join(f'data-{k}="{attrs[k]}"' for k in attrs)  # noqa: B028, B907
+        div_class = current_app.config.get("RECAPTCHA_DIV_CLASS")
+        if not div_class:
+            div_class = RECAPTCHA_DIV_CLASS_DEFAULT
+        return Markup(RECAPTCHA_TEMPLATE % (script, div_class, snippet))
+
+    def __call__(self, field, error=None, **kwargs):
+        """Returns the recaptcha input HTML."""
+
+        try:
+            public_key = current_app.config["RECAPTCHA_PUBLIC_KEY"]
+        except KeyError:
+            raise RuntimeError("RECAPTCHA_PUBLIC_KEY config not set") from None
+
+        return self.recaptcha_html(public_key)
diff --git a/venv/Lib/site-packages/mako/__init__.py b/venv/Lib/site-packages/mako/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..25d577dada2f0c216fcd65dc88eb817e9162d23e
--- /dev/null
+++ b/venv/Lib/site-packages/mako/__init__.py
@@ -0,0 +1,8 @@
+# mako/__init__.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+
+__version__ = "1.3.0"
diff --git a/venv/Lib/site-packages/mako/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..278aeb75c4f8037a6c7c5098aa6e24f76bd9afef
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/_ast_util.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/_ast_util.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..043cdf6768af7958ebe9c8001cd8f07aefe8ffe3
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/_ast_util.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/ast.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/ast.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..db8b086602ec8fb42426db84b55e8d747973ed20
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/ast.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/cache.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/cache.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3225064fb5d510d21ae86abe576760616a3d9970
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/cache.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/cmd.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/cmd.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09841b6f32f9e6495d11bd213764b0e3d96b5d24
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/cmd.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/codegen.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/codegen.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f520786c0ec2c863d77b1c36518577d3acb2fd64
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/codegen.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/compat.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/compat.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23c331aad0a130b1d879278fb827d500715aadaa
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/compat.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/exceptions.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/exceptions.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9ecf45316f1bc68183e4c577c8c94aa5d3f09f7
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/exceptions.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/filters.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/filters.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..98e6565c5ae8128a17cfeac3a0bc094f7d81d7bd
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/filters.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/lexer.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/lexer.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf0b93d961fbc52f4234ed92e7bc3fb9ec34e3c5
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/lexer.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/lookup.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/lookup.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36a51f735d7c5392f4542bd3ca463bce0f82b7f2
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/lookup.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/parsetree.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/parsetree.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..085b64368d83c6e6074a135a4390672266759937
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/parsetree.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/pygen.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/pygen.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9d2abc1b70e9cba53e369bf1a8389dc29d17841a
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/pygen.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/pyparser.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/pyparser.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d7170ba1eba0afceafd9410bd531d8c8d98c371
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/pyparser.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/runtime.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/runtime.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d22d77265d576e6e0bc9997578b0b1caa8769afa
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/runtime.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/template.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/template.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..addcdf63a898371ea9bce415c3003c6be1a9871e
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/template.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/__pycache__/util.cpython-311.pyc b/venv/Lib/site-packages/mako/__pycache__/util.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..299b0421db1e90db59ad4de59959d6bc7195f1d9
Binary files /dev/null and b/venv/Lib/site-packages/mako/__pycache__/util.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/_ast_util.py b/venv/Lib/site-packages/mako/_ast_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dcdb7f92f33a7614e7d3c42d9843fc974ea8be3
--- /dev/null
+++ b/venv/Lib/site-packages/mako/_ast_util.py
@@ -0,0 +1,713 @@
+# mako/_ast_util.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""
+    ast
+    ~~~
+
+    This is a stripped down version of Armin Ronacher's ast module.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: Python License.
+"""
+
+
+from _ast import Add
+from _ast import And
+from _ast import AST
+from _ast import BitAnd
+from _ast import BitOr
+from _ast import BitXor
+from _ast import Div
+from _ast import Eq
+from _ast import FloorDiv
+from _ast import Gt
+from _ast import GtE
+from _ast import If
+from _ast import In
+from _ast import Invert
+from _ast import Is
+from _ast import IsNot
+from _ast import LShift
+from _ast import Lt
+from _ast import LtE
+from _ast import Mod
+from _ast import Mult
+from _ast import Name
+from _ast import Not
+from _ast import NotEq
+from _ast import NotIn
+from _ast import Or
+from _ast import PyCF_ONLY_AST
+from _ast import RShift
+from _ast import Sub
+from _ast import UAdd
+from _ast import USub
+
+
+BOOLOP_SYMBOLS = {And: "and", Or: "or"}
+
+BINOP_SYMBOLS = {
+    Add: "+",
+    Sub: "-",
+    Mult: "*",
+    Div: "/",
+    FloorDiv: "//",
+    Mod: "%",
+    LShift: "<<",
+    RShift: ">>",
+    BitOr: "|",
+    BitAnd: "&",
+    BitXor: "^",
+}
+
+CMPOP_SYMBOLS = {
+    Eq: "==",
+    Gt: ">",
+    GtE: ">=",
+    In: "in",
+    Is: "is",
+    IsNot: "is not",
+    Lt: "<",
+    LtE: "<=",
+    NotEq: "!=",
+    NotIn: "not in",
+}
+
+UNARYOP_SYMBOLS = {Invert: "~", Not: "not", UAdd: "+", USub: "-"}
+
+ALL_SYMBOLS = {}
+ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
+ALL_SYMBOLS.update(BINOP_SYMBOLS)
+ALL_SYMBOLS.update(CMPOP_SYMBOLS)
+ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
+
+
+def parse(expr, filename="<unknown>", mode="exec"):
+    """Parse an expression into an AST node."""
+    return compile(expr, filename, mode, PyCF_ONLY_AST)
+
+
+def iter_fields(node):
+    """Iterate over all fields of a node, only yielding existing fields."""
+
+    for field in node._fields:
+        try:
+            yield field, getattr(node, field)
+        except AttributeError:
+            pass
+
+
+class NodeVisitor:
+
+    """
+    Walks the abstract syntax tree and call visitor functions for every node
+    found.  The visitor functions may return values which will be forwarded
+    by the `visit` method.
+
+    Per default the visitor functions for the nodes are ``'visit_'`` +
+    class name of the node.  So a `TryFinally` node visit function would
+    be `visit_TryFinally`.  This behavior can be changed by overriding
+    the `get_visitor` function.  If no visitor function exists for a node
+    (return value `None`) the `generic_visit` visitor is used instead.
+
+    Don't use the `NodeVisitor` if you want to apply changes to nodes during
+    traversing.  For this a special visitor exists (`NodeTransformer`) that
+    allows modifications.
+    """
+
+    def get_visitor(self, node):
+        """
+        Return the visitor function for this node or `None` if no visitor
+        exists for this node.  In that case the generic visit function is
+        used instead.
+        """
+        method = "visit_" + node.__class__.__name__
+        return getattr(self, method, None)
+
+    def visit(self, node):
+        """Visit a node."""
+        f = self.get_visitor(node)
+        if f is not None:
+            return f(node)
+        return self.generic_visit(node)
+
+    def generic_visit(self, node):
+        """Called if no explicit visitor function exists for a node."""
+        for field, value in iter_fields(node):
+            if isinstance(value, list):
+                for item in value:
+                    if isinstance(item, AST):
+                        self.visit(item)
+            elif isinstance(value, AST):
+                self.visit(value)
+
+
+class NodeTransformer(NodeVisitor):
+
+    """
+    Walks the abstract syntax tree and allows modifications of nodes.
+
+    The `NodeTransformer` will walk the AST and use the return value of the
+    visitor functions to replace or remove the old node.  If the return
+    value of the visitor function is `None` the node will be removed
+    from the previous location otherwise it's replaced with the return
+    value.  The return value may be the original node in which case no
+    replacement takes place.
+
+    Here an example transformer that rewrites all `foo` to `data['foo']`::
+
+        class RewriteName(NodeTransformer):
+
+            def visit_Name(self, node):
+                return copy_location(Subscript(
+                    value=Name(id='data', ctx=Load()),
+                    slice=Index(value=Str(s=node.id)),
+                    ctx=node.ctx
+                ), node)
+
+    Keep in mind that if the node you're operating on has child nodes
+    you must either transform the child nodes yourself or call the generic
+    visit function for the node first.
+
+    Nodes that were part of a collection of statements (that applies to
+    all statement nodes) may also return a list of nodes rather than just
+    a single node.
+
+    Usually you use the transformer like this::
+
+        node = YourTransformer().visit(node)
+    """
+
+    def generic_visit(self, node):
+        for field, old_value in iter_fields(node):
+            old_value = getattr(node, field, None)
+            if isinstance(old_value, list):
+                new_values = []
+                for value in old_value:
+                    if isinstance(value, AST):
+                        value = self.visit(value)
+                        if value is None:
+                            continue
+                        elif not isinstance(value, AST):
+                            new_values.extend(value)
+                            continue
+                    new_values.append(value)
+                old_value[:] = new_values
+            elif isinstance(old_value, AST):
+                new_node = self.visit(old_value)
+                if new_node is None:
+                    delattr(node, field)
+                else:
+                    setattr(node, field, new_node)
+        return node
+
+
+class SourceGenerator(NodeVisitor):
+
+    """
+    This visitor is able to transform a well formed syntax tree into python
+    sourcecode.  For more details have a look at the docstring of the
+    `node_to_source` function.
+    """
+
+    def __init__(self, indent_with):
+        self.result = []
+        self.indent_with = indent_with
+        self.indentation = 0
+        self.new_lines = 0
+
+    def write(self, x):
+        if self.new_lines:
+            if self.result:
+                self.result.append("\n" * self.new_lines)
+            self.result.append(self.indent_with * self.indentation)
+            self.new_lines = 0
+        self.result.append(x)
+
+    def newline(self, n=1):
+        self.new_lines = max(self.new_lines, n)
+
+    def body(self, statements):
+        self.new_line = True
+        self.indentation += 1
+        for stmt in statements:
+            self.visit(stmt)
+        self.indentation -= 1
+
+    def body_or_else(self, node):
+        self.body(node.body)
+        if node.orelse:
+            self.newline()
+            self.write("else:")
+            self.body(node.orelse)
+
+    def signature(self, node):
+        want_comma = []
+
+        def write_comma():
+            if want_comma:
+                self.write(", ")
+            else:
+                want_comma.append(True)
+
+        padding = [None] * (len(node.args) - len(node.defaults))
+        for arg, default in zip(node.args, padding + node.defaults):
+            write_comma()
+            self.visit(arg)
+            if default is not None:
+                self.write("=")
+                self.visit(default)
+        if node.vararg is not None:
+            write_comma()
+            self.write("*" + node.vararg.arg)
+        if node.kwarg is not None:
+            write_comma()
+            self.write("**" + node.kwarg.arg)
+
+    def decorators(self, node):
+        for decorator in node.decorator_list:
+            self.newline()
+            self.write("@")
+            self.visit(decorator)
+
+    # Statements
+
+    def visit_Assign(self, node):
+        self.newline()
+        for idx, target in enumerate(node.targets):
+            if idx:
+                self.write(", ")
+            self.visit(target)
+        self.write(" = ")
+        self.visit(node.value)
+
+    def visit_AugAssign(self, node):
+        self.newline()
+        self.visit(node.target)
+        self.write(BINOP_SYMBOLS[type(node.op)] + "=")
+        self.visit(node.value)
+
+    def visit_ImportFrom(self, node):
+        self.newline()
+        self.write("from %s%s import " % ("." * node.level, node.module))
+        for idx, item in enumerate(node.names):
+            if idx:
+                self.write(", ")
+            self.write(item)
+
+    def visit_Import(self, node):
+        self.newline()
+        for item in node.names:
+            self.write("import ")
+            self.visit(item)
+
+    def visit_Expr(self, node):
+        self.newline()
+        self.generic_visit(node)
+
+    def visit_FunctionDef(self, node):
+        self.newline(n=2)
+        self.decorators(node)
+        self.newline()
+        self.write("def %s(" % node.name)
+        self.signature(node.args)
+        self.write("):")
+        self.body(node.body)
+
+    def visit_ClassDef(self, node):
+        have_args = []
+
+        def paren_or_comma():
+            if have_args:
+                self.write(", ")
+            else:
+                have_args.append(True)
+                self.write("(")
+
+        self.newline(n=3)
+        self.decorators(node)
+        self.newline()
+        self.write("class %s" % node.name)
+        for base in node.bases:
+            paren_or_comma()
+            self.visit(base)
+        # XXX: the if here is used to keep this module compatible
+        #      with python 2.6.
+        if hasattr(node, "keywords"):
+            for keyword in node.keywords:
+                paren_or_comma()
+                self.write(keyword.arg + "=")
+                self.visit(keyword.value)
+            if getattr(node, "starargs", None):
+                paren_or_comma()
+                self.write("*")
+                self.visit(node.starargs)
+            if getattr(node, "kwargs", None):
+                paren_or_comma()
+                self.write("**")
+                self.visit(node.kwargs)
+        self.write(have_args and "):" or ":")
+        self.body(node.body)
+
+    def visit_If(self, node):
+        self.newline()
+        self.write("if ")
+        self.visit(node.test)
+        self.write(":")
+        self.body(node.body)
+        while True:
+            else_ = node.orelse
+            if len(else_) == 1 and isinstance(else_[0], If):
+                node = else_[0]
+                self.newline()
+                self.write("elif ")
+                self.visit(node.test)
+                self.write(":")
+                self.body(node.body)
+            else:
+                self.newline()
+                self.write("else:")
+                self.body(else_)
+                break
+
+    def visit_For(self, node):
+        self.newline()
+        self.write("for ")
+        self.visit(node.target)
+        self.write(" in ")
+        self.visit(node.iter)
+        self.write(":")
+        self.body_or_else(node)
+
+    def visit_While(self, node):
+        self.newline()
+        self.write("while ")
+        self.visit(node.test)
+        self.write(":")
+        self.body_or_else(node)
+
+    def visit_With(self, node):
+        self.newline()
+        self.write("with ")
+        self.visit(node.context_expr)
+        if node.optional_vars is not None:
+            self.write(" as ")
+            self.visit(node.optional_vars)
+        self.write(":")
+        self.body(node.body)
+
+    def visit_Pass(self, node):
+        self.newline()
+        self.write("pass")
+
+    def visit_Print(self, node):
+        # XXX: python 2.6 only
+        self.newline()
+        self.write("print ")
+        want_comma = False
+        if node.dest is not None:
+            self.write(" >> ")
+            self.visit(node.dest)
+            want_comma = True
+        for value in node.values:
+            if want_comma:
+                self.write(", ")
+            self.visit(value)
+            want_comma = True
+        if not node.nl:
+            self.write(",")
+
+    def visit_Delete(self, node):
+        self.newline()
+        self.write("del ")
+        for idx, target in enumerate(node):
+            if idx:
+                self.write(", ")
+            self.visit(target)
+
+    def visit_TryExcept(self, node):
+        self.newline()
+        self.write("try:")
+        self.body(node.body)
+        for handler in node.handlers:
+            self.visit(handler)
+
+    def visit_TryFinally(self, node):
+        self.newline()
+        self.write("try:")
+        self.body(node.body)
+        self.newline()
+        self.write("finally:")
+        self.body(node.finalbody)
+
+    def visit_Global(self, node):
+        self.newline()
+        self.write("global " + ", ".join(node.names))
+
+    def visit_Nonlocal(self, node):
+        self.newline()
+        self.write("nonlocal " + ", ".join(node.names))
+
+    def visit_Return(self, node):
+        self.newline()
+        self.write("return ")
+        self.visit(node.value)
+
+    def visit_Break(self, node):
+        self.newline()
+        self.write("break")
+
+    def visit_Continue(self, node):
+        self.newline()
+        self.write("continue")
+
+    def visit_Raise(self, node):
+        # XXX: Python 2.6 / 3.0 compatibility
+        self.newline()
+        self.write("raise")
+        if hasattr(node, "exc") and node.exc is not None:
+            self.write(" ")
+            self.visit(node.exc)
+            if node.cause is not None:
+                self.write(" from ")
+                self.visit(node.cause)
+        elif hasattr(node, "type") and node.type is not None:
+            self.visit(node.type)
+            if node.inst is not None:
+                self.write(", ")
+                self.visit(node.inst)
+            if node.tback is not None:
+                self.write(", ")
+                self.visit(node.tback)
+
+    # Expressions
+
+    def visit_Attribute(self, node):
+        self.visit(node.value)
+        self.write("." + node.attr)
+
+    def visit_Call(self, node):
+        want_comma = []
+
+        def write_comma():
+            if want_comma:
+                self.write(", ")
+            else:
+                want_comma.append(True)
+
+        self.visit(node.func)
+        self.write("(")
+        for arg in node.args:
+            write_comma()
+            self.visit(arg)
+        for keyword in node.keywords:
+            write_comma()
+            self.write(keyword.arg + "=")
+            self.visit(keyword.value)
+        if getattr(node, "starargs", None):
+            write_comma()
+            self.write("*")
+            self.visit(node.starargs)
+        if getattr(node, "kwargs", None):
+            write_comma()
+            self.write("**")
+            self.visit(node.kwargs)
+        self.write(")")
+
+    def visit_Name(self, node):
+        self.write(node.id)
+
+    def visit_NameConstant(self, node):
+        self.write(str(node.value))
+
+    def visit_arg(self, node):
+        self.write(node.arg)
+
+    def visit_Str(self, node):
+        self.write(repr(node.s))
+
+    def visit_Bytes(self, node):
+        self.write(repr(node.s))
+
+    def visit_Num(self, node):
+        self.write(repr(node.n))
+
+    # newly needed in Python 3.8
+    def visit_Constant(self, node):
+        self.write(repr(node.value))
+
+    def visit_Tuple(self, node):
+        self.write("(")
+        idx = -1
+        for idx, item in enumerate(node.elts):
+            if idx:
+                self.write(", ")
+            self.visit(item)
+        self.write(idx and ")" or ",)")
+
+    def sequence_visit(left, right):
+        def visit(self, node):
+            self.write(left)
+            for idx, item in enumerate(node.elts):
+                if idx:
+                    self.write(", ")
+                self.visit(item)
+            self.write(right)
+
+        return visit
+
+    visit_List = sequence_visit("[", "]")
+    visit_Set = sequence_visit("{", "}")
+    del sequence_visit
+
+    def visit_Dict(self, node):
+        self.write("{")
+        for idx, (key, value) in enumerate(zip(node.keys, node.values)):
+            if idx:
+                self.write(", ")
+            self.visit(key)
+            self.write(": ")
+            self.visit(value)
+        self.write("}")
+
+    def visit_BinOp(self, node):
+        self.write("(")
+        self.visit(node.left)
+        self.write(" %s " % BINOP_SYMBOLS[type(node.op)])
+        self.visit(node.right)
+        self.write(")")
+
+    def visit_BoolOp(self, node):
+        self.write("(")
+        for idx, value in enumerate(node.values):
+            if idx:
+                self.write(" %s " % BOOLOP_SYMBOLS[type(node.op)])
+            self.visit(value)
+        self.write(")")
+
+    def visit_Compare(self, node):
+        self.write("(")
+        self.visit(node.left)
+        for op, right in zip(node.ops, node.comparators):
+            self.write(" %s " % CMPOP_SYMBOLS[type(op)])
+            self.visit(right)
+        self.write(")")
+
+    def visit_UnaryOp(self, node):
+        self.write("(")
+        op = UNARYOP_SYMBOLS[type(node.op)]
+        self.write(op)
+        if op == "not":
+            self.write(" ")
+        self.visit(node.operand)
+        self.write(")")
+
+    def visit_Subscript(self, node):
+        self.visit(node.value)
+        self.write("[")
+        self.visit(node.slice)
+        self.write("]")
+
+    def visit_Slice(self, node):
+        if node.lower is not None:
+            self.visit(node.lower)
+        self.write(":")
+        if node.upper is not None:
+            self.visit(node.upper)
+        if node.step is not None:
+            self.write(":")
+            if not (isinstance(node.step, Name) and node.step.id == "None"):
+                self.visit(node.step)
+
+    def visit_ExtSlice(self, node):
+        for idx, item in node.dims:
+            if idx:
+                self.write(", ")
+            self.visit(item)
+
+    def visit_Yield(self, node):
+        self.write("yield ")
+        self.visit(node.value)
+
+    def visit_Lambda(self, node):
+        self.write("lambda ")
+        self.signature(node.args)
+        self.write(": ")
+        self.visit(node.body)
+
+    def visit_Ellipsis(self, node):
+        self.write("Ellipsis")
+
+    def generator_visit(left, right):
+        def visit(self, node):
+            self.write(left)
+            self.visit(node.elt)
+            for comprehension in node.generators:
+                self.visit(comprehension)
+            self.write(right)
+
+        return visit
+
+    visit_ListComp = generator_visit("[", "]")
+    visit_GeneratorExp = generator_visit("(", ")")
+    visit_SetComp = generator_visit("{", "}")
+    del generator_visit
+
+    def visit_DictComp(self, node):
+        self.write("{")
+        self.visit(node.key)
+        self.write(": ")
+        self.visit(node.value)
+        for comprehension in node.generators:
+            self.visit(comprehension)
+        self.write("}")
+
+    def visit_IfExp(self, node):
+        self.visit(node.body)
+        self.write(" if ")
+        self.visit(node.test)
+        self.write(" else ")
+        self.visit(node.orelse)
+
+    def visit_Starred(self, node):
+        self.write("*")
+        self.visit(node.value)
+
+    def visit_Repr(self, node):
+        # XXX: python 2.6 only
+        self.write("`")
+        self.visit(node.value)
+        self.write("`")
+
+    # Helper Nodes
+
+    def visit_alias(self, node):
+        self.write(node.name)
+        if node.asname is not None:
+            self.write(" as " + node.asname)
+
+    def visit_comprehension(self, node):
+        self.write(" for ")
+        self.visit(node.target)
+        self.write(" in ")
+        self.visit(node.iter)
+        if node.ifs:
+            for if_ in node.ifs:
+                self.write(" if ")
+                self.visit(if_)
+
+    def visit_excepthandler(self, node):
+        self.newline()
+        self.write("except")
+        if node.type is not None:
+            self.write(" ")
+            self.visit(node.type)
+            if node.name is not None:
+                self.write(" as ")
+                self.visit(node.name)
+        self.write(":")
+        self.body(node.body)
diff --git a/venv/Lib/site-packages/mako/ast.py b/venv/Lib/site-packages/mako/ast.py
new file mode 100644
index 0000000000000000000000000000000000000000..3076e2ee10fd659b0bb55fe0e05b94931abe1d6b
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ast.py
@@ -0,0 +1,202 @@
+# mako/ast.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""utilities for analyzing expressions and blocks of Python
+code, as well as generating Python from AST nodes"""
+
+import re
+
+from mako import exceptions
+from mako import pyparser
+
+
+class PythonCode:
+
+    """represents information about a string containing Python code"""
+
+    def __init__(self, code, **exception_kwargs):
+        self.code = code
+
+        # represents all identifiers which are assigned to at some point in
+        # the code
+        self.declared_identifiers = set()
+
+        # represents all identifiers which are referenced before their
+        # assignment, if any
+        self.undeclared_identifiers = set()
+
+        # note that an identifier can be in both the undeclared and declared
+        # lists.
+
+        # using AST to parse instead of using code.co_varnames,
+        # code.co_names has several advantages:
+        # - we can locate an identifier as "undeclared" even if
+        # its declared later in the same block of code
+        # - AST is less likely to break with version changes
+        # (for example, the behavior of co_names changed a little bit
+        # in python version 2.5)
+        if isinstance(code, str):
+            expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
+        else:
+            expr = code
+
+        f = pyparser.FindIdentifiers(self, **exception_kwargs)
+        f.visit(expr)
+
+
+class ArgumentList:
+
+    """parses a fragment of code as a comma-separated list of expressions"""
+
+    def __init__(self, code, **exception_kwargs):
+        self.codeargs = []
+        self.args = []
+        self.declared_identifiers = set()
+        self.undeclared_identifiers = set()
+        if isinstance(code, str):
+            if re.match(r"\S", code) and not re.match(r",\s*$", code):
+                # if theres text and no trailing comma, insure its parsed
+                # as a tuple by adding a trailing comma
+                code += ","
+            expr = pyparser.parse(code, "exec", **exception_kwargs)
+        else:
+            expr = code
+
+        f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
+        f.visit(expr)
+
+
+class PythonFragment(PythonCode):
+
+    """extends PythonCode to provide identifier lookups in partial control
+    statements
+
+    e.g.::
+
+        for x in 5:
+        elif y==9:
+        except (MyException, e):
+
+    """
+
+    def __init__(self, code, **exception_kwargs):
+        m = re.match(r"^(\w+)(?:\s+(.*?))?:\s*(#|$)", code.strip(), re.S)
+        if not m:
+            raise exceptions.CompileException(
+                "Fragment '%s' is not a partial control statement" % code,
+                **exception_kwargs,
+            )
+        if m.group(3):
+            code = code[: m.start(3)]
+        (keyword, expr) = m.group(1, 2)
+        if keyword in ["for", "if", "while"]:
+            code = code + "pass"
+        elif keyword == "try":
+            code = code + "pass\nexcept:pass"
+        elif keyword in ["elif", "else"]:
+            code = "if False:pass\n" + code + "pass"
+        elif keyword == "except":
+            code = "try:pass\n" + code + "pass"
+        elif keyword == "with":
+            code = code + "pass"
+        else:
+            raise exceptions.CompileException(
+                "Unsupported control keyword: '%s'" % keyword,
+                **exception_kwargs,
+            )
+        super().__init__(code, **exception_kwargs)
+
+
+class FunctionDecl:
+
+    """function declaration"""
+
+    def __init__(self, code, allow_kwargs=True, **exception_kwargs):
+        self.code = code
+        expr = pyparser.parse(code, "exec", **exception_kwargs)
+
+        f = pyparser.ParseFunc(self, **exception_kwargs)
+        f.visit(expr)
+        if not hasattr(self, "funcname"):
+            raise exceptions.CompileException(
+                "Code '%s' is not a function declaration" % code,
+                **exception_kwargs,
+            )
+        if not allow_kwargs and self.kwargs:
+            raise exceptions.CompileException(
+                "'**%s' keyword argument not allowed here"
+                % self.kwargnames[-1],
+                **exception_kwargs,
+            )
+
+    def get_argument_expressions(self, as_call=False):
+        """Return the argument declarations of this FunctionDecl as a printable
+        list.
+
+        By default the return value is appropriate for writing in a ``def``;
+        set `as_call` to true to build arguments to be passed to the function
+        instead (assuming locals with the same names as the arguments exist).
+        """
+
+        namedecls = []
+
+        # Build in reverse order, since defaults and slurpy args come last
+        argnames = self.argnames[::-1]
+        kwargnames = self.kwargnames[::-1]
+        defaults = self.defaults[::-1]
+        kwdefaults = self.kwdefaults[::-1]
+
+        # Named arguments
+        if self.kwargs:
+            namedecls.append("**" + kwargnames.pop(0))
+
+        for name in kwargnames:
+            # Keyword-only arguments must always be used by name, so even if
+            # this is a call, print out `foo=foo`
+            if as_call:
+                namedecls.append("%s=%s" % (name, name))
+            elif kwdefaults:
+                default = kwdefaults.pop(0)
+                if default is None:
+                    # The AST always gives kwargs a default, since you can do
+                    # `def foo(*, a=1, b, c=3)`
+                    namedecls.append(name)
+                else:
+                    namedecls.append(
+                        "%s=%s"
+                        % (name, pyparser.ExpressionGenerator(default).value())
+                    )
+            else:
+                namedecls.append(name)
+
+        # Positional arguments
+        if self.varargs:
+            namedecls.append("*" + argnames.pop(0))
+
+        for name in argnames:
+            if as_call or not defaults:
+                namedecls.append(name)
+            else:
+                default = defaults.pop(0)
+                namedecls.append(
+                    "%s=%s"
+                    % (name, pyparser.ExpressionGenerator(default).value())
+                )
+
+        namedecls.reverse()
+        return namedecls
+
+    @property
+    def allargnames(self):
+        return tuple(self.argnames) + tuple(self.kwargnames)
+
+
+class FunctionArgs(FunctionDecl):
+
+    """the argument portion of a function declaration"""
+
+    def __init__(self, code, **kwargs):
+        super().__init__("def ANON(%s):pass" % code, **kwargs)
diff --git a/venv/Lib/site-packages/mako/cache.py b/venv/Lib/site-packages/mako/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4e32d0f479f075d3e0f18b8968ee838fb41b2db
--- /dev/null
+++ b/venv/Lib/site-packages/mako/cache.py
@@ -0,0 +1,239 @@
+# mako/cache.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from mako import util
+
+_cache_plugins = util.PluginLoader("mako.cache")
+
+register_plugin = _cache_plugins.register
+register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl")
+
+
+class Cache:
+
+    """Represents a data content cache made available to the module
+    space of a specific :class:`.Template` object.
+
+    .. versionadded:: 0.6
+       :class:`.Cache` by itself is mostly a
+       container for a :class:`.CacheImpl` object, which implements
+       a fixed API to provide caching services; specific subclasses exist to
+       implement different
+       caching strategies.   Mako includes a backend that works with
+       the Beaker caching system.   Beaker itself then supports
+       a number of backends (i.e. file, memory, memcached, etc.)
+
+    The construction of a :class:`.Cache` is part of the mechanics
+    of a :class:`.Template`, and programmatic access to this
+    cache is typically via the :attr:`.Template.cache` attribute.
+
+    """
+
+    impl = None
+    """Provide the :class:`.CacheImpl` in use by this :class:`.Cache`.
+
+    This accessor allows a :class:`.CacheImpl` with additional
+    methods beyond that of :class:`.Cache` to be used programmatically.
+
+    """
+
+    id = None
+    """Return the 'id' that identifies this cache.
+
+    This is a value that should be globally unique to the
+    :class:`.Template` associated with this cache, and can
+    be used by a caching system to name a local container
+    for data specific to this template.
+
+    """
+
+    starttime = None
+    """Epochal time value for when the owning :class:`.Template` was
+    first compiled.
+
+    A cache implementation may wish to invalidate data earlier than
+    this timestamp; this has the effect of the cache for a specific
+    :class:`.Template` starting clean any time the :class:`.Template`
+    is recompiled, such as when the original template file changed on
+    the filesystem.
+
+    """
+
+    def __init__(self, template, *args):
+        # check for a stale template calling the
+        # constructor
+        if isinstance(template, str) and args:
+            return
+        self.template = template
+        self.id = template.module.__name__
+        self.starttime = template.module._modified_time
+        self._def_regions = {}
+        self.impl = self._load_impl(self.template.cache_impl)
+
+    def _load_impl(self, name):
+        return _cache_plugins.load(name)(self)
+
+    def get_or_create(self, key, creation_function, **kw):
+        """Retrieve a value from the cache, using the given creation function
+        to generate a new value."""
+
+        return self._ctx_get_or_create(key, creation_function, None, **kw)
+
+    def _ctx_get_or_create(self, key, creation_function, context, **kw):
+        """Retrieve a value from the cache, using the given creation function
+        to generate a new value."""
+
+        if not self.template.cache_enabled:
+            return creation_function()
+
+        return self.impl.get_or_create(
+            key, creation_function, **self._get_cache_kw(kw, context)
+        )
+
+    def set(self, key, value, **kw):
+        r"""Place a value in the cache.
+
+        :param key: the value's key.
+        :param value: the value.
+        :param \**kw: cache configuration arguments.
+
+        """
+
+        self.impl.set(key, value, **self._get_cache_kw(kw, None))
+
+    put = set
+    """A synonym for :meth:`.Cache.set`.
+
+    This is here for backwards compatibility.
+
+    """
+
+    def get(self, key, **kw):
+        r"""Retrieve a value from the cache.
+
+        :param key: the value's key.
+        :param \**kw: cache configuration arguments.  The
+         backend is configured using these arguments upon first request.
+         Subsequent requests that use the same series of configuration
+         values will use that same backend.
+
+        """
+        return self.impl.get(key, **self._get_cache_kw(kw, None))
+
+    def invalidate(self, key, **kw):
+        r"""Invalidate a value in the cache.
+
+        :param key: the value's key.
+        :param \**kw: cache configuration arguments.  The
+         backend is configured using these arguments upon first request.
+         Subsequent requests that use the same series of configuration
+         values will use that same backend.
+
+        """
+        self.impl.invalidate(key, **self._get_cache_kw(kw, None))
+
+    def invalidate_body(self):
+        """Invalidate the cached content of the "body" method for this
+        template.
+
+        """
+        self.invalidate("render_body", __M_defname="render_body")
+
+    def invalidate_def(self, name):
+        """Invalidate the cached content of a particular ``<%def>`` within this
+        template.
+
+        """
+
+        self.invalidate("render_%s" % name, __M_defname="render_%s" % name)
+
+    def invalidate_closure(self, name):
+        """Invalidate a nested ``<%def>`` within this template.
+
+        Caching of nested defs is a blunt tool as there is no
+        management of scope -- nested defs that use cache tags
+        need to have names unique of all other nested defs in the
+        template, else their content will be overwritten by
+        each other.
+
+        """
+
+        self.invalidate(name, __M_defname=name)
+
+    def _get_cache_kw(self, kw, context):
+        defname = kw.pop("__M_defname", None)
+        if not defname:
+            tmpl_kw = self.template.cache_args.copy()
+            tmpl_kw.update(kw)
+        elif defname in self._def_regions:
+            tmpl_kw = self._def_regions[defname]
+        else:
+            tmpl_kw = self.template.cache_args.copy()
+            tmpl_kw.update(kw)
+            self._def_regions[defname] = tmpl_kw
+        if context and self.impl.pass_context:
+            tmpl_kw = tmpl_kw.copy()
+            tmpl_kw.setdefault("context", context)
+        return tmpl_kw
+
+
+class CacheImpl:
+
+    """Provide a cache implementation for use by :class:`.Cache`."""
+
+    def __init__(self, cache):
+        self.cache = cache
+
+    pass_context = False
+    """If ``True``, the :class:`.Context` will be passed to
+    :meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``.
+    """
+
+    def get_or_create(self, key, creation_function, **kw):
+        r"""Retrieve a value from the cache, using the given creation function
+        to generate a new value.
+
+        This function *must* return a value, either from
+        the cache, or via the given creation function.
+        If the creation function is called, the newly
+        created value should be populated into the cache
+        under the given key before being returned.
+
+        :param key: the value's key.
+        :param creation_function: function that when called generates
+         a new value.
+        :param \**kw: cache configuration arguments.
+
+        """
+        raise NotImplementedError()
+
+    def set(self, key, value, **kw):
+        r"""Place a value in the cache.
+
+        :param key: the value's key.
+        :param value: the value.
+        :param \**kw: cache configuration arguments.
+
+        """
+        raise NotImplementedError()
+
+    def get(self, key, **kw):
+        r"""Retrieve a value from the cache.
+
+        :param key: the value's key.
+        :param \**kw: cache configuration arguments.
+
+        """
+        raise NotImplementedError()
+
+    def invalidate(self, key, **kw):
+        r"""Invalidate a value in the cache.
+
+        :param key: the value's key.
+        :param \**kw: cache configuration arguments.
+
+        """
+        raise NotImplementedError()
diff --git a/venv/Lib/site-packages/mako/cmd.py b/venv/Lib/site-packages/mako/cmd.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bb8197413f258f72aa4ca25ecbb9fa94a0eb092
--- /dev/null
+++ b/venv/Lib/site-packages/mako/cmd.py
@@ -0,0 +1,99 @@
+# mako/cmd.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+from argparse import ArgumentParser
+from os.path import dirname
+from os.path import isfile
+import sys
+
+from mako import exceptions
+from mako.lookup import TemplateLookup
+from mako.template import Template
+
+
+def varsplit(var):
+    if "=" not in var:
+        return (var, "")
+    return var.split("=", 1)
+
+
+def _exit():
+    sys.stderr.write(exceptions.text_error_template().render())
+    sys.exit(1)
+
+
+def cmdline(argv=None):
+    parser = ArgumentParser()
+    parser.add_argument(
+        "--var",
+        default=[],
+        action="append",
+        help="variable (can be used multiple times, use name=value)",
+    )
+    parser.add_argument(
+        "--template-dir",
+        default=[],
+        action="append",
+        help="Directory to use for template lookup (multiple "
+        "directories may be provided). If not given then if the "
+        "template is read from stdin, the value defaults to be "
+        "the current directory, otherwise it defaults to be the "
+        "parent directory of the file provided.",
+    )
+    parser.add_argument(
+        "--output-encoding", default=None, help="force output encoding"
+    )
+    parser.add_argument(
+        "--output-file",
+        default=None,
+        help="Write to file upon successful render instead of stdout",
+    )
+    parser.add_argument("input", nargs="?", default="-")
+
+    options = parser.parse_args(argv)
+
+    output_encoding = options.output_encoding
+    output_file = options.output_file
+
+    if options.input == "-":
+        lookup_dirs = options.template_dir or ["."]
+        lookup = TemplateLookup(lookup_dirs)
+        try:
+            template = Template(
+                sys.stdin.read(),
+                lookup=lookup,
+                output_encoding=output_encoding,
+            )
+        except:
+            _exit()
+    else:
+        filename = options.input
+        if not isfile(filename):
+            raise SystemExit("error: can't find %s" % filename)
+        lookup_dirs = options.template_dir or [dirname(filename)]
+        lookup = TemplateLookup(lookup_dirs)
+        try:
+            template = Template(
+                filename=filename,
+                lookup=lookup,
+                output_encoding=output_encoding,
+            )
+        except:
+            _exit()
+
+    kw = dict(varsplit(var) for var in options.var)
+    try:
+        rendered = template.render(**kw)
+    except:
+        _exit()
+    else:
+        if output_file:
+            open(output_file, "wt", encoding=output_encoding).write(rendered)
+        else:
+            sys.stdout.write(rendered)
+
+
+if __name__ == "__main__":
+    cmdline()
diff --git a/venv/Lib/site-packages/mako/codegen.py b/venv/Lib/site-packages/mako/codegen.py
new file mode 100644
index 0000000000000000000000000000000000000000..a516d3bcc7ab9a28f5da8b954441541adaec48a7
--- /dev/null
+++ b/venv/Lib/site-packages/mako/codegen.py
@@ -0,0 +1,1307 @@
+# mako/codegen.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""provides functionality for rendering a parsetree constructing into module
+source code."""
+
+import json
+import re
+import time
+
+from mako import ast
+from mako import exceptions
+from mako import filters
+from mako import parsetree
+from mako import util
+from mako.pygen import PythonPrinter
+
+
+MAGIC_NUMBER = 10
+
+# names which are hardwired into the
+# template and are not accessed via the
+# context itself
+TOPLEVEL_DECLARED = {"UNDEFINED", "STOP_RENDERING"}
+RESERVED_NAMES = {"context", "loop"}.union(TOPLEVEL_DECLARED)
+
+
+def compile(  # noqa
+    node,
+    uri,
+    filename=None,
+    default_filters=None,
+    buffer_filters=None,
+    imports=None,
+    future_imports=None,
+    source_encoding=None,
+    generate_magic_comment=True,
+    strict_undefined=False,
+    enable_loop=True,
+    reserved_names=frozenset(),
+):
+    """Generate module source code given a parsetree node,
+    uri, and optional source filename"""
+
+    buf = util.FastEncodingBuffer()
+
+    printer = PythonPrinter(buf)
+    _GenerateRenderMethod(
+        printer,
+        _CompileContext(
+            uri,
+            filename,
+            default_filters,
+            buffer_filters,
+            imports,
+            future_imports,
+            source_encoding,
+            generate_magic_comment,
+            strict_undefined,
+            enable_loop,
+            reserved_names,
+        ),
+        node,
+    )
+    return buf.getvalue()
+
+
+class _CompileContext:
+    def __init__(
+        self,
+        uri,
+        filename,
+        default_filters,
+        buffer_filters,
+        imports,
+        future_imports,
+        source_encoding,
+        generate_magic_comment,
+        strict_undefined,
+        enable_loop,
+        reserved_names,
+    ):
+        self.uri = uri
+        self.filename = filename
+        self.default_filters = default_filters
+        self.buffer_filters = buffer_filters
+        self.imports = imports
+        self.future_imports = future_imports
+        self.source_encoding = source_encoding
+        self.generate_magic_comment = generate_magic_comment
+        self.strict_undefined = strict_undefined
+        self.enable_loop = enable_loop
+        self.reserved_names = reserved_names
+
+
+class _GenerateRenderMethod:
+
+    """A template visitor object which generates the
+    full module source for a template.
+
+    """
+
+    def __init__(self, printer, compiler, node):
+        self.printer = printer
+        self.compiler = compiler
+        self.node = node
+        self.identifier_stack = [None]
+        self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
+
+        if self.in_def:
+            name = "render_%s" % node.funcname
+            args = node.get_argument_expressions()
+            filtered = len(node.filter_args.args) > 0
+            buffered = eval(node.attributes.get("buffered", "False"))
+            cached = eval(node.attributes.get("cached", "False"))
+            defs = None
+            pagetag = None
+            if node.is_block and not node.is_anonymous:
+                args += ["**pageargs"]
+        else:
+            defs = self.write_toplevel()
+            pagetag = self.compiler.pagetag
+            name = "render_body"
+            if pagetag is not None:
+                args = pagetag.body_decl.get_argument_expressions()
+                if not pagetag.body_decl.kwargs:
+                    args += ["**pageargs"]
+                cached = eval(pagetag.attributes.get("cached", "False"))
+                self.compiler.enable_loop = self.compiler.enable_loop or eval(
+                    pagetag.attributes.get("enable_loop", "False")
+                )
+            else:
+                args = ["**pageargs"]
+                cached = False
+            buffered = filtered = False
+        if args is None:
+            args = ["context"]
+        else:
+            args = [a for a in ["context"] + args]
+
+        self.write_render_callable(
+            pagetag or node, name, args, buffered, filtered, cached
+        )
+
+        if defs is not None:
+            for node in defs:
+                _GenerateRenderMethod(printer, compiler, node)
+
+        if not self.in_def:
+            self.write_metadata_struct()
+
+    def write_metadata_struct(self):
+        self.printer.source_map[self.printer.lineno] = max(
+            self.printer.source_map
+        )
+        struct = {
+            "filename": self.compiler.filename,
+            "uri": self.compiler.uri,
+            "source_encoding": self.compiler.source_encoding,
+            "line_map": self.printer.source_map,
+        }
+        self.printer.writelines(
+            '"""',
+            "__M_BEGIN_METADATA",
+            json.dumps(struct),
+            "__M_END_METADATA\n" '"""',
+        )
+
+    @property
+    def identifiers(self):
+        return self.identifier_stack[-1]
+
+    def write_toplevel(self):
+        """Traverse a template structure for module-level directives and
+        generate the start of module-level code.
+
+        """
+        inherit = []
+        namespaces = {}
+        module_code = []
+
+        self.compiler.pagetag = None
+
+        class FindTopLevel:
+            def visitInheritTag(s, node):
+                inherit.append(node)
+
+            def visitNamespaceTag(s, node):
+                namespaces[node.name] = node
+
+            def visitPageTag(s, node):
+                self.compiler.pagetag = node
+
+            def visitCode(s, node):
+                if node.ismodule:
+                    module_code.append(node)
+
+        f = FindTopLevel()
+        for n in self.node.nodes:
+            n.accept_visitor(f)
+
+        self.compiler.namespaces = namespaces
+
+        module_ident = set()
+        for n in module_code:
+            module_ident = module_ident.union(n.declared_identifiers())
+
+        module_identifiers = _Identifiers(self.compiler)
+        module_identifiers.declared = module_ident
+
+        # module-level names, python code
+        if (
+            self.compiler.generate_magic_comment
+            and self.compiler.source_encoding
+        ):
+            self.printer.writeline(
+                "# -*- coding:%s -*-" % self.compiler.source_encoding
+            )
+
+        if self.compiler.future_imports:
+            self.printer.writeline(
+                "from __future__ import %s"
+                % (", ".join(self.compiler.future_imports),)
+            )
+        self.printer.writeline("from mako import runtime, filters, cache")
+        self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
+        self.printer.writeline("STOP_RENDERING = runtime.STOP_RENDERING")
+        self.printer.writeline("__M_dict_builtin = dict")
+        self.printer.writeline("__M_locals_builtin = locals")
+        self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
+        self.printer.writeline("_modified_time = %r" % time.time())
+        self.printer.writeline("_enable_loop = %r" % self.compiler.enable_loop)
+        self.printer.writeline(
+            "_template_filename = %r" % self.compiler.filename
+        )
+        self.printer.writeline("_template_uri = %r" % self.compiler.uri)
+        self.printer.writeline(
+            "_source_encoding = %r" % self.compiler.source_encoding
+        )
+        if self.compiler.imports:
+            buf = ""
+            for imp in self.compiler.imports:
+                buf += imp + "\n"
+                self.printer.writeline(imp)
+            impcode = ast.PythonCode(
+                buf,
+                source="",
+                lineno=0,
+                pos=0,
+                filename="template defined imports",
+            )
+        else:
+            impcode = None
+
+        main_identifiers = module_identifiers.branch(self.node)
+        mit = module_identifiers.topleveldefs
+        module_identifiers.topleveldefs = mit.union(
+            main_identifiers.topleveldefs
+        )
+        module_identifiers.declared.update(TOPLEVEL_DECLARED)
+        if impcode:
+            module_identifiers.declared.update(impcode.declared_identifiers)
+
+        self.compiler.identifiers = module_identifiers
+        self.printer.writeline(
+            "_exports = %r"
+            % [n.name for n in main_identifiers.topleveldefs.values()]
+        )
+        self.printer.write_blanks(2)
+
+        if len(module_code):
+            self.write_module_code(module_code)
+
+        if len(inherit):
+            self.write_namespaces(namespaces)
+            self.write_inherit(inherit[-1])
+        elif len(namespaces):
+            self.write_namespaces(namespaces)
+
+        return list(main_identifiers.topleveldefs.values())
+
+    def write_render_callable(
+        self, node, name, args, buffered, filtered, cached
+    ):
+        """write a top-level render callable.
+
+        this could be the main render() method or that of a top-level def."""
+
+        if self.in_def:
+            decorator = node.decorator
+            if decorator:
+                self.printer.writeline(
+                    "@runtime._decorate_toplevel(%s)" % decorator
+                )
+
+        self.printer.start_source(node.lineno)
+        self.printer.writelines(
+            "def %s(%s):" % (name, ",".join(args)),
+            # push new frame, assign current frame to __M_caller
+            "__M_caller = context.caller_stack._push_frame()",
+            "try:",
+        )
+        if buffered or filtered or cached:
+            self.printer.writeline("context._push_buffer()")
+
+        self.identifier_stack.append(
+            self.compiler.identifiers.branch(self.node)
+        )
+        if (not self.in_def or self.node.is_block) and "**pageargs" in args:
+            self.identifier_stack[-1].argument_declared.add("pageargs")
+
+        if not self.in_def and (
+            len(self.identifiers.locally_assigned) > 0
+            or len(self.identifiers.argument_declared) > 0
+        ):
+            self.printer.writeline(
+                "__M_locals = __M_dict_builtin(%s)"
+                % ",".join(
+                    [
+                        "%s=%s" % (x, x)
+                        for x in self.identifiers.argument_declared
+                    ]
+                )
+            )
+
+        self.write_variable_declares(self.identifiers, toplevel=True)
+
+        for n in self.node.nodes:
+            n.accept_visitor(self)
+
+        self.write_def_finish(self.node, buffered, filtered, cached)
+        self.printer.writeline(None)
+        self.printer.write_blanks(2)
+        if cached:
+            self.write_cache_decorator(
+                node, name, args, buffered, self.identifiers, toplevel=True
+            )
+
+    def write_module_code(self, module_code):
+        """write module-level template code, i.e. that which
+        is enclosed in <%! %> tags in the template."""
+        for n in module_code:
+            self.printer.write_indented_block(n.text, starting_lineno=n.lineno)
+
+    def write_inherit(self, node):
+        """write the module-level inheritance-determination callable."""
+
+        self.printer.writelines(
+            "def _mako_inherit(template, context):",
+            "_mako_generate_namespaces(context)",
+            "return runtime._inherit_from(context, %s, _template_uri)"
+            % (node.parsed_attributes["file"]),
+            None,
+        )
+
+    def write_namespaces(self, namespaces):
+        """write the module-level namespace-generating callable."""
+        self.printer.writelines(
+            "def _mako_get_namespace(context, name):",
+            "try:",
+            "return context.namespaces[(__name__, name)]",
+            "except KeyError:",
+            "_mako_generate_namespaces(context)",
+            "return context.namespaces[(__name__, name)]",
+            None,
+            None,
+        )
+        self.printer.writeline("def _mako_generate_namespaces(context):")
+
+        for node in namespaces.values():
+            if "import" in node.attributes:
+                self.compiler.has_ns_imports = True
+            self.printer.start_source(node.lineno)
+            if len(node.nodes):
+                self.printer.writeline("def make_namespace():")
+                export = []
+                identifiers = self.compiler.identifiers.branch(node)
+                self.in_def = True
+
+                class NSDefVisitor:
+                    def visitDefTag(s, node):
+                        s.visitDefOrBase(node)
+
+                    def visitBlockTag(s, node):
+                        s.visitDefOrBase(node)
+
+                    def visitDefOrBase(s, node):
+                        if node.is_anonymous:
+                            raise exceptions.CompileException(
+                                "Can't put anonymous blocks inside "
+                                "<%namespace>",
+                                **node.exception_kwargs,
+                            )
+                        self.write_inline_def(node, identifiers, nested=False)
+                        export.append(node.funcname)
+
+                vis = NSDefVisitor()
+                for n in node.nodes:
+                    n.accept_visitor(vis)
+                self.printer.writeline("return [%s]" % (",".join(export)))
+                self.printer.writeline(None)
+                self.in_def = False
+                callable_name = "make_namespace()"
+            else:
+                callable_name = "None"
+
+            if "file" in node.parsed_attributes:
+                self.printer.writeline(
+                    "ns = runtime.TemplateNamespace(%r,"
+                    " context._clean_inheritance_tokens(),"
+                    " templateuri=%s, callables=%s, "
+                    " calling_uri=_template_uri)"
+                    % (
+                        node.name,
+                        node.parsed_attributes.get("file", "None"),
+                        callable_name,
+                    )
+                )
+            elif "module" in node.parsed_attributes:
+                self.printer.writeline(
+                    "ns = runtime.ModuleNamespace(%r,"
+                    " context._clean_inheritance_tokens(),"
+                    " callables=%s, calling_uri=_template_uri,"
+                    " module=%s)"
+                    % (
+                        node.name,
+                        callable_name,
+                        node.parsed_attributes.get("module", "None"),
+                    )
+                )
+            else:
+                self.printer.writeline(
+                    "ns = runtime.Namespace(%r,"
+                    " context._clean_inheritance_tokens(),"
+                    " callables=%s, calling_uri=_template_uri)"
+                    % (node.name, callable_name)
+                )
+            if eval(node.attributes.get("inheritable", "False")):
+                self.printer.writeline("context['self'].%s = ns" % (node.name))
+
+            self.printer.writeline(
+                "context.namespaces[(__name__, %s)] = ns" % repr(node.name)
+            )
+            self.printer.write_blanks(1)
+        if not len(namespaces):
+            self.printer.writeline("pass")
+        self.printer.writeline(None)
+
+    def write_variable_declares(self, identifiers, toplevel=False, limit=None):
+        """write variable declarations at the top of a function.
+
+        the variable declarations are in the form of callable
+        definitions for defs and/or name lookup within the
+        function's context argument. the names declared are based
+        on the names that are referenced in the function body,
+        which don't otherwise have any explicit assignment
+        operation. names that are assigned within the body are
+        assumed to be locally-scoped variables and are not
+        separately declared.
+
+        for def callable definitions, if the def is a top-level
+        callable then a 'stub' callable is generated which wraps
+        the current Context into a closure. if the def is not
+        top-level, it is fully rendered as a local closure.
+
+        """
+
+        # collection of all defs available to us in this scope
+        comp_idents = {c.funcname: c for c in identifiers.defs}
+        to_write = set()
+
+        # write "context.get()" for all variables we are going to
+        # need that arent in the namespace yet
+        to_write = to_write.union(identifiers.undeclared)
+
+        # write closure functions for closures that we define
+        # right here
+        to_write = to_write.union(
+            [c.funcname for c in identifiers.closuredefs.values()]
+        )
+
+        # remove identifiers that are declared in the argument
+        # signature of the callable
+        to_write = to_write.difference(identifiers.argument_declared)
+
+        # remove identifiers that we are going to assign to.
+        # in this way we mimic Python's behavior,
+        # i.e. assignment to a variable within a block
+        # means that variable is now a "locally declared" var,
+        # which cannot be referenced beforehand.
+        to_write = to_write.difference(identifiers.locally_declared)
+
+        if self.compiler.enable_loop:
+            has_loop = "loop" in to_write
+            to_write.discard("loop")
+        else:
+            has_loop = False
+
+        # if a limiting set was sent, constraint to those items in that list
+        # (this is used for the caching decorator)
+        if limit is not None:
+            to_write = to_write.intersection(limit)
+
+        if toplevel and getattr(self.compiler, "has_ns_imports", False):
+            self.printer.writeline("_import_ns = {}")
+            self.compiler.has_imports = True
+            for ident, ns in self.compiler.namespaces.items():
+                if "import" in ns.attributes:
+                    self.printer.writeline(
+                        "_mako_get_namespace(context, %r)."
+                        "_populate(_import_ns, %r)"
+                        % (
+                            ident,
+                            re.split(r"\s*,\s*", ns.attributes["import"]),
+                        )
+                    )
+
+        if has_loop:
+            self.printer.writeline("loop = __M_loop = runtime.LoopStack()")
+
+        for ident in to_write:
+            if ident in comp_idents:
+                comp = comp_idents[ident]
+                if comp.is_block:
+                    if not comp.is_anonymous:
+                        self.write_def_decl(comp, identifiers)
+                    else:
+                        self.write_inline_def(comp, identifiers, nested=True)
+                else:
+                    if comp.is_root():
+                        self.write_def_decl(comp, identifiers)
+                    else:
+                        self.write_inline_def(comp, identifiers, nested=True)
+
+            elif ident in self.compiler.namespaces:
+                self.printer.writeline(
+                    "%s = _mako_get_namespace(context, %r)" % (ident, ident)
+                )
+            else:
+                if getattr(self.compiler, "has_ns_imports", False):
+                    if self.compiler.strict_undefined:
+                        self.printer.writelines(
+                            "%s = _import_ns.get(%r, UNDEFINED)"
+                            % (ident, ident),
+                            "if %s is UNDEFINED:" % ident,
+                            "try:",
+                            "%s = context[%r]" % (ident, ident),
+                            "except KeyError:",
+                            "raise NameError(\"'%s' is not defined\")" % ident,
+                            None,
+                            None,
+                        )
+                    else:
+                        self.printer.writeline(
+                            "%s = _import_ns.get"
+                            "(%r, context.get(%r, UNDEFINED))"
+                            % (ident, ident, ident)
+                        )
+                else:
+                    if self.compiler.strict_undefined:
+                        self.printer.writelines(
+                            "try:",
+                            "%s = context[%r]" % (ident, ident),
+                            "except KeyError:",
+                            "raise NameError(\"'%s' is not defined\")" % ident,
+                            None,
+                        )
+                    else:
+                        self.printer.writeline(
+                            "%s = context.get(%r, UNDEFINED)" % (ident, ident)
+                        )
+
+        self.printer.writeline("__M_writer = context.writer()")
+
+    def write_def_decl(self, node, identifiers):
+        """write a locally-available callable referencing a top-level def"""
+        funcname = node.funcname
+        namedecls = node.get_argument_expressions()
+        nameargs = node.get_argument_expressions(as_call=True)
+
+        if not self.in_def and (
+            len(self.identifiers.locally_assigned) > 0
+            or len(self.identifiers.argument_declared) > 0
+        ):
+            nameargs.insert(0, "context._locals(__M_locals)")
+        else:
+            nameargs.insert(0, "context")
+        self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
+        self.printer.writeline(
+            "return render_%s(%s)" % (funcname, ",".join(nameargs))
+        )
+        self.printer.writeline(None)
+
+    def write_inline_def(self, node, identifiers, nested):
+        """write a locally-available def callable inside an enclosing def."""
+
+        namedecls = node.get_argument_expressions()
+
+        decorator = node.decorator
+        if decorator:
+            self.printer.writeline(
+                "@runtime._decorate_inline(context, %s)" % decorator
+            )
+        self.printer.writeline(
+            "def %s(%s):" % (node.funcname, ",".join(namedecls))
+        )
+        filtered = len(node.filter_args.args) > 0
+        buffered = eval(node.attributes.get("buffered", "False"))
+        cached = eval(node.attributes.get("cached", "False"))
+        self.printer.writelines(
+            # push new frame, assign current frame to __M_caller
+            "__M_caller = context.caller_stack._push_frame()",
+            "try:",
+        )
+        if buffered or filtered or cached:
+            self.printer.writelines("context._push_buffer()")
+
+        identifiers = identifiers.branch(node, nested=nested)
+
+        self.write_variable_declares(identifiers)
+
+        self.identifier_stack.append(identifiers)
+        for n in node.nodes:
+            n.accept_visitor(self)
+        self.identifier_stack.pop()
+
+        self.write_def_finish(node, buffered, filtered, cached)
+        self.printer.writeline(None)
+        if cached:
+            self.write_cache_decorator(
+                node,
+                node.funcname,
+                namedecls,
+                False,
+                identifiers,
+                inline=True,
+                toplevel=False,
+            )
+
+    def write_def_finish(
+        self, node, buffered, filtered, cached, callstack=True
+    ):
+        """write the end section of a rendering function, either outermost or
+        inline.
+
+        this takes into account if the rendering function was filtered,
+        buffered, etc.  and closes the corresponding try: block if any, and
+        writes code to retrieve captured content, apply filters, send proper
+        return value."""
+
+        if not buffered and not cached and not filtered:
+            self.printer.writeline("return ''")
+            if callstack:
+                self.printer.writelines(
+                    "finally:", "context.caller_stack._pop_frame()", None
+                )
+
+        if buffered or filtered or cached:
+            if buffered or cached:
+                # in a caching scenario, don't try to get a writer
+                # from the context after popping; assume the caching
+                # implemenation might be using a context with no
+                # extra buffers
+                self.printer.writelines(
+                    "finally:", "__M_buf = context._pop_buffer()"
+                )
+            else:
+                self.printer.writelines(
+                    "finally:",
+                    "__M_buf, __M_writer = context._pop_buffer_and_writer()",
+                )
+
+            if callstack:
+                self.printer.writeline("context.caller_stack._pop_frame()")
+
+            s = "__M_buf.getvalue()"
+            if filtered:
+                s = self.create_filter_callable(
+                    node.filter_args.args, s, False
+                )
+            self.printer.writeline(None)
+            if buffered and not cached:
+                s = self.create_filter_callable(
+                    self.compiler.buffer_filters, s, False
+                )
+            if buffered or cached:
+                self.printer.writeline("return %s" % s)
+            else:
+                self.printer.writelines("__M_writer(%s)" % s, "return ''")
+
+    def write_cache_decorator(
+        self,
+        node_or_pagetag,
+        name,
+        args,
+        buffered,
+        identifiers,
+        inline=False,
+        toplevel=False,
+    ):
+        """write a post-function decorator to replace a rendering
+        callable with a cached version of itself."""
+
+        self.printer.writeline("__M_%s = %s" % (name, name))
+        cachekey = node_or_pagetag.parsed_attributes.get(
+            "cache_key", repr(name)
+        )
+
+        cache_args = {}
+        if self.compiler.pagetag is not None:
+            cache_args.update(
+                (pa[6:], self.compiler.pagetag.parsed_attributes[pa])
+                for pa in self.compiler.pagetag.parsed_attributes
+                if pa.startswith("cache_") and pa != "cache_key"
+            )
+        cache_args.update(
+            (pa[6:], node_or_pagetag.parsed_attributes[pa])
+            for pa in node_or_pagetag.parsed_attributes
+            if pa.startswith("cache_") and pa != "cache_key"
+        )
+        if "timeout" in cache_args:
+            cache_args["timeout"] = int(eval(cache_args["timeout"]))
+
+        self.printer.writeline("def %s(%s):" % (name, ",".join(args)))
+
+        # form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
+        pass_args = [
+            "%s=%s" % ((a.split("=")[0],) * 2) if "=" in a else a for a in args
+        ]
+
+        self.write_variable_declares(
+            identifiers,
+            toplevel=toplevel,
+            limit=node_or_pagetag.undeclared_identifiers(),
+        )
+        if buffered:
+            s = (
+                "context.get('local')."
+                "cache._ctx_get_or_create("
+                "%s, lambda:__M_%s(%s),  context, %s__M_defname=%r)"
+                % (
+                    cachekey,
+                    name,
+                    ",".join(pass_args),
+                    "".join(
+                        ["%s=%s, " % (k, v) for k, v in cache_args.items()]
+                    ),
+                    name,
+                )
+            )
+            # apply buffer_filters
+            s = self.create_filter_callable(
+                self.compiler.buffer_filters, s, False
+            )
+            self.printer.writelines("return " + s, None)
+        else:
+            self.printer.writelines(
+                "__M_writer(context.get('local')."
+                "cache._ctx_get_or_create("
+                "%s, lambda:__M_%s(%s), context, %s__M_defname=%r))"
+                % (
+                    cachekey,
+                    name,
+                    ",".join(pass_args),
+                    "".join(
+                        ["%s=%s, " % (k, v) for k, v in cache_args.items()]
+                    ),
+                    name,
+                ),
+                "return ''",
+                None,
+            )
+
+    def create_filter_callable(self, args, target, is_expression):
+        """write a filter-applying expression based on the filters
+        present in the given filter names, adjusting for the global
+        'default' filter aliases as needed."""
+
+        def locate_encode(name):
+            if re.match(r"decode\..+", name):
+                return "filters." + name
+            else:
+                return filters.DEFAULT_ESCAPES.get(name, name)
+
+        if "n" not in args:
+            if is_expression:
+                if self.compiler.pagetag:
+                    args = self.compiler.pagetag.filter_args.args + args
+                if self.compiler.default_filters and "n" not in args:
+                    args = self.compiler.default_filters + args
+        for e in args:
+            # if filter given as a function, get just the identifier portion
+            if e == "n":
+                continue
+            m = re.match(r"(.+?)(\(.*\))", e)
+            if m:
+                ident, fargs = m.group(1, 2)
+                f = locate_encode(ident)
+                e = f + fargs
+            else:
+                e = locate_encode(e)
+                assert e is not None
+            target = "%s(%s)" % (e, target)
+        return target
+
+    def visitExpression(self, node):
+        self.printer.start_source(node.lineno)
+        if (
+            len(node.escapes)
+            or (
+                self.compiler.pagetag is not None
+                and len(self.compiler.pagetag.filter_args.args)
+            )
+            or len(self.compiler.default_filters)
+        ):
+            s = self.create_filter_callable(
+                node.escapes_code.args, "%s" % node.text, True
+            )
+            self.printer.writeline("__M_writer(%s)" % s)
+        else:
+            self.printer.writeline("__M_writer(%s)" % node.text)
+
+    def visitControlLine(self, node):
+        if node.isend:
+            self.printer.writeline(None)
+            if node.has_loop_context:
+                self.printer.writeline("finally:")
+                self.printer.writeline("loop = __M_loop._exit()")
+                self.printer.writeline(None)
+        else:
+            self.printer.start_source(node.lineno)
+            if self.compiler.enable_loop and node.keyword == "for":
+                text = mangle_mako_loop(node, self.printer)
+            else:
+                text = node.text
+            self.printer.writeline(text)
+            children = node.get_children()
+            # this covers the three situations where we want to insert a pass:
+            #    1) a ternary control line with no children,
+            #    2) a primary control line with nothing but its own ternary
+            #          and end control lines, and
+            #    3) any control line with no content other than comments
+            if not children or (
+                all(
+                    isinstance(c, (parsetree.Comment, parsetree.ControlLine))
+                    for c in children
+                )
+                and all(
+                    (node.is_ternary(c.keyword) or c.isend)
+                    for c in children
+                    if isinstance(c, parsetree.ControlLine)
+                )
+            ):
+                self.printer.writeline("pass")
+
+    def visitText(self, node):
+        self.printer.start_source(node.lineno)
+        self.printer.writeline("__M_writer(%s)" % repr(node.content))
+
+    def visitTextTag(self, node):
+        filtered = len(node.filter_args.args) > 0
+        if filtered:
+            self.printer.writelines(
+                "__M_writer = context._push_writer()", "try:"
+            )
+        for n in node.nodes:
+            n.accept_visitor(self)
+        if filtered:
+            self.printer.writelines(
+                "finally:",
+                "__M_buf, __M_writer = context._pop_buffer_and_writer()",
+                "__M_writer(%s)"
+                % self.create_filter_callable(
+                    node.filter_args.args, "__M_buf.getvalue()", False
+                ),
+                None,
+            )
+
+    def visitCode(self, node):
+        if not node.ismodule:
+            self.printer.write_indented_block(
+                node.text, starting_lineno=node.lineno
+            )
+
+            if not self.in_def and len(self.identifiers.locally_assigned) > 0:
+                # if we are the "template" def, fudge locally
+                # declared/modified variables into the "__M_locals" dictionary,
+                # which is used for def calls within the same template,
+                # to simulate "enclosing scope"
+                self.printer.writeline(
+                    "__M_locals_builtin_stored = __M_locals_builtin()"
+                )
+                self.printer.writeline(
+                    "__M_locals.update(__M_dict_builtin([(__M_key,"
+                    " __M_locals_builtin_stored[__M_key]) for __M_key in"
+                    " [%s] if __M_key in __M_locals_builtin_stored]))"
+                    % ",".join([repr(x) for x in node.declared_identifiers()])
+                )
+
+    def visitIncludeTag(self, node):
+        self.printer.start_source(node.lineno)
+        args = node.attributes.get("args")
+        if args:
+            self.printer.writeline(
+                "runtime._include_file(context, %s, _template_uri, %s)"
+                % (node.parsed_attributes["file"], args)
+            )
+        else:
+            self.printer.writeline(
+                "runtime._include_file(context, %s, _template_uri)"
+                % (node.parsed_attributes["file"])
+            )
+
+    def visitNamespaceTag(self, node):
+        pass
+
+    def visitDefTag(self, node):
+        pass
+
+    def visitBlockTag(self, node):
+        if node.is_anonymous:
+            self.printer.writeline("%s()" % node.funcname)
+        else:
+            nameargs = node.get_argument_expressions(as_call=True)
+            nameargs += ["**pageargs"]
+            self.printer.writeline(
+                "if 'parent' not in context._data or "
+                "not hasattr(context._data['parent'], '%s'):" % node.funcname
+            )
+            self.printer.writeline(
+                "context['self'].%s(%s)" % (node.funcname, ",".join(nameargs))
+            )
+            self.printer.writeline("\n")
+
+    def visitCallNamespaceTag(self, node):
+        # TODO: we can put namespace-specific checks here, such
+        # as ensure the given namespace will be imported,
+        # pre-import the namespace, etc.
+        self.visitCallTag(node)
+
+    def visitCallTag(self, node):
+        self.printer.writeline("def ccall(caller):")
+        export = ["body"]
+        callable_identifiers = self.identifiers.branch(node, nested=True)
+        body_identifiers = callable_identifiers.branch(node, nested=False)
+        # we want the 'caller' passed to ccall to be used
+        # for the body() function, but for other non-body()
+        # <%def>s within <%call> we want the current caller
+        # off the call stack (if any)
+        body_identifiers.add_declared("caller")
+
+        self.identifier_stack.append(body_identifiers)
+
+        class DefVisitor:
+            def visitDefTag(s, node):
+                s.visitDefOrBase(node)
+
+            def visitBlockTag(s, node):
+                s.visitDefOrBase(node)
+
+            def visitDefOrBase(s, node):
+                self.write_inline_def(node, callable_identifiers, nested=False)
+                if not node.is_anonymous:
+                    export.append(node.funcname)
+                # remove defs that are within the <%call> from the
+                # "closuredefs" defined in the body, so they dont render twice
+                if node.funcname in body_identifiers.closuredefs:
+                    del body_identifiers.closuredefs[node.funcname]
+
+        vis = DefVisitor()
+        for n in node.nodes:
+            n.accept_visitor(vis)
+        self.identifier_stack.pop()
+
+        bodyargs = node.body_decl.get_argument_expressions()
+        self.printer.writeline("def body(%s):" % ",".join(bodyargs))
+
+        # TODO: figure out best way to specify
+        # buffering/nonbuffering (at call time would be better)
+        buffered = False
+        if buffered:
+            self.printer.writelines("context._push_buffer()", "try:")
+        self.write_variable_declares(body_identifiers)
+        self.identifier_stack.append(body_identifiers)
+
+        for n in node.nodes:
+            n.accept_visitor(self)
+        self.identifier_stack.pop()
+
+        self.write_def_finish(node, buffered, False, False, callstack=False)
+        self.printer.writelines(None, "return [%s]" % (",".join(export)), None)
+
+        self.printer.writelines(
+            # push on caller for nested call
+            "context.caller_stack.nextcaller = "
+            "runtime.Namespace('caller', context, "
+            "callables=ccall(__M_caller))",
+            "try:",
+        )
+        self.printer.start_source(node.lineno)
+        self.printer.writelines(
+            "__M_writer(%s)"
+            % self.create_filter_callable([], node.expression, True),
+            "finally:",
+            "context.caller_stack.nextcaller = None",
+            None,
+        )
+
+
+class _Identifiers:
+
+    """tracks the status of identifier names as template code is rendered."""
+
+    def __init__(self, compiler, node=None, parent=None, nested=False):
+        if parent is not None:
+            # if we are the branch created in write_namespaces(),
+            # we don't share any context from the main body().
+            if isinstance(node, parsetree.NamespaceTag):
+                self.declared = set()
+                self.topleveldefs = util.SetLikeDict()
+            else:
+                # things that have already been declared
+                # in an enclosing namespace (i.e. names we can just use)
+                self.declared = (
+                    set(parent.declared)
+                    .union([c.name for c in parent.closuredefs.values()])
+                    .union(parent.locally_declared)
+                    .union(parent.argument_declared)
+                )
+
+                # if these identifiers correspond to a "nested"
+                # scope, it means whatever the parent identifiers
+                # had as undeclared will have been declared by that parent,
+                # and therefore we have them in our scope.
+                if nested:
+                    self.declared = self.declared.union(parent.undeclared)
+
+                # top level defs that are available
+                self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
+        else:
+            self.declared = set()
+            self.topleveldefs = util.SetLikeDict()
+
+        self.compiler = compiler
+
+        # things within this level that are referenced before they
+        # are declared (e.g. assigned to)
+        self.undeclared = set()
+
+        # things that are declared locally.  some of these things
+        # could be in the "undeclared" list as well if they are
+        # referenced before declared
+        self.locally_declared = set()
+
+        # assignments made in explicit python blocks.
+        # these will be propagated to
+        # the context of local def calls.
+        self.locally_assigned = set()
+
+        # things that are declared in the argument
+        # signature of the def callable
+        self.argument_declared = set()
+
+        # closure defs that are defined in this level
+        self.closuredefs = util.SetLikeDict()
+
+        self.node = node
+
+        if node is not None:
+            node.accept_visitor(self)
+
+        illegal_names = self.compiler.reserved_names.intersection(
+            self.locally_declared
+        )
+        if illegal_names:
+            raise exceptions.NameConflictError(
+                "Reserved words declared in template: %s"
+                % ", ".join(illegal_names)
+            )
+
+    def branch(self, node, **kwargs):
+        """create a new Identifiers for a new Node, with
+        this Identifiers as the parent."""
+
+        return _Identifiers(self.compiler, node, self, **kwargs)
+
+    @property
+    def defs(self):
+        return set(self.topleveldefs.union(self.closuredefs).values())
+
+    def __repr__(self):
+        return (
+            "Identifiers(declared=%r, locally_declared=%r, "
+            "undeclared=%r, topleveldefs=%r, closuredefs=%r, "
+            "argumentdeclared=%r)"
+            % (
+                list(self.declared),
+                list(self.locally_declared),
+                list(self.undeclared),
+                [c.name for c in self.topleveldefs.values()],
+                [c.name for c in self.closuredefs.values()],
+                self.argument_declared,
+            )
+        )
+
+    def check_declared(self, node):
+        """update the state of this Identifiers with the undeclared
+        and declared identifiers of the given node."""
+
+        for ident in node.undeclared_identifiers():
+            if ident != "context" and ident not in self.declared.union(
+                self.locally_declared
+            ):
+                self.undeclared.add(ident)
+        for ident in node.declared_identifiers():
+            self.locally_declared.add(ident)
+
+    def add_declared(self, ident):
+        self.declared.add(ident)
+        if ident in self.undeclared:
+            self.undeclared.remove(ident)
+
+    def visitExpression(self, node):
+        self.check_declared(node)
+
+    def visitControlLine(self, node):
+        self.check_declared(node)
+
+    def visitCode(self, node):
+        if not node.ismodule:
+            self.check_declared(node)
+            self.locally_assigned = self.locally_assigned.union(
+                node.declared_identifiers()
+            )
+
+    def visitNamespaceTag(self, node):
+        # only traverse into the sub-elements of a
+        # <%namespace> tag if we are the branch created in
+        # write_namespaces()
+        if self.node is node:
+            for n in node.nodes:
+                n.accept_visitor(self)
+
+    def _check_name_exists(self, collection, node):
+        existing = collection.get(node.funcname)
+        collection[node.funcname] = node
+        if (
+            existing is not None
+            and existing is not node
+            and (node.is_block or existing.is_block)
+        ):
+            raise exceptions.CompileException(
+                "%%def or %%block named '%s' already "
+                "exists in this template." % node.funcname,
+                **node.exception_kwargs,
+            )
+
+    def visitDefTag(self, node):
+        if node.is_root() and not node.is_anonymous:
+            self._check_name_exists(self.topleveldefs, node)
+        elif node is not self.node:
+            self._check_name_exists(self.closuredefs, node)
+
+        for ident in node.undeclared_identifiers():
+            if ident != "context" and ident not in self.declared.union(
+                self.locally_declared
+            ):
+                self.undeclared.add(ident)
+
+        # visit defs only one level deep
+        if node is self.node:
+            for ident in node.declared_identifiers():
+                self.argument_declared.add(ident)
+
+            for n in node.nodes:
+                n.accept_visitor(self)
+
+    def visitBlockTag(self, node):
+        if node is not self.node and not node.is_anonymous:
+            if isinstance(self.node, parsetree.DefTag):
+                raise exceptions.CompileException(
+                    "Named block '%s' not allowed inside of def '%s'"
+                    % (node.name, self.node.name),
+                    **node.exception_kwargs,
+                )
+            elif isinstance(
+                self.node, (parsetree.CallTag, parsetree.CallNamespaceTag)
+            ):
+                raise exceptions.CompileException(
+                    "Named block '%s' not allowed inside of <%%call> tag"
+                    % (node.name,),
+                    **node.exception_kwargs,
+                )
+
+        for ident in node.undeclared_identifiers():
+            if ident != "context" and ident not in self.declared.union(
+                self.locally_declared
+            ):
+                self.undeclared.add(ident)
+
+        if not node.is_anonymous:
+            self._check_name_exists(self.topleveldefs, node)
+            self.undeclared.add(node.funcname)
+        elif node is not self.node:
+            self._check_name_exists(self.closuredefs, node)
+        for ident in node.declared_identifiers():
+            self.argument_declared.add(ident)
+        for n in node.nodes:
+            n.accept_visitor(self)
+
+    def visitTextTag(self, node):
+        for ident in node.undeclared_identifiers():
+            if ident != "context" and ident not in self.declared.union(
+                self.locally_declared
+            ):
+                self.undeclared.add(ident)
+
+    def visitIncludeTag(self, node):
+        self.check_declared(node)
+
+    def visitPageTag(self, node):
+        for ident in node.declared_identifiers():
+            self.argument_declared.add(ident)
+        self.check_declared(node)
+
+    def visitCallNamespaceTag(self, node):
+        self.visitCallTag(node)
+
+    def visitCallTag(self, node):
+        if node is self.node:
+            for ident in node.undeclared_identifiers():
+                if ident != "context" and ident not in self.declared.union(
+                    self.locally_declared
+                ):
+                    self.undeclared.add(ident)
+            for ident in node.declared_identifiers():
+                self.argument_declared.add(ident)
+            for n in node.nodes:
+                n.accept_visitor(self)
+        else:
+            for ident in node.undeclared_identifiers():
+                if ident != "context" and ident not in self.declared.union(
+                    self.locally_declared
+                ):
+                    self.undeclared.add(ident)
+
+
+_FOR_LOOP = re.compile(
+    r"^for\s+((?:\(?)\s*"
+    r"(?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*"
+    r"(?:\s*,\s*(?:[A-Za-z_][A-Za-z_0-9]*),??)*\s*(?:\)?)"
+    r"(?:\s*,\s*(?:"
+    r"(?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*"
+    r"(?:\s*,\s*(?:[A-Za-z_][A-Za-z_0-9]*),??)*\s*(?:\)?)"
+    r"),??)*\s*(?:\)?))\s+in\s+(.*):"
+)
+
+
+def mangle_mako_loop(node, printer):
+    """converts a for loop into a context manager wrapped around a for loop
+    when access to the `loop` variable has been detected in the for loop body
+    """
+    loop_variable = LoopVariable()
+    node.accept_visitor(loop_variable)
+    if loop_variable.detected:
+        node.nodes[-1].has_loop_context = True
+        match = _FOR_LOOP.match(node.text)
+        if match:
+            printer.writelines(
+                "loop = __M_loop._enter(%s)" % match.group(2),
+                "try:"
+                # 'with __M_loop(%s) as loop:' % match.group(2)
+            )
+            text = "for %s in loop:" % match.group(1)
+        else:
+            raise SyntaxError("Couldn't apply loop context: %s" % node.text)
+    else:
+        text = node.text
+    return text
+
+
+class LoopVariable:
+
+    """A node visitor which looks for the name 'loop' within undeclared
+    identifiers."""
+
+    def __init__(self):
+        self.detected = False
+
+    def _loop_reference_detected(self, node):
+        if "loop" in node.undeclared_identifiers():
+            self.detected = True
+        else:
+            for n in node.get_children():
+                n.accept_visitor(self)
+
+    def visitControlLine(self, node):
+        self._loop_reference_detected(node)
+
+    def visitCode(self, node):
+        self._loop_reference_detected(node)
+
+    def visitExpression(self, node):
+        self._loop_reference_detected(node)
diff --git a/venv/Lib/site-packages/mako/compat.py b/venv/Lib/site-packages/mako/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..4de11c5903fd21b86067d2f8b27f6184d97070c5
--- /dev/null
+++ b/venv/Lib/site-packages/mako/compat.py
@@ -0,0 +1,70 @@
+# mako/compat.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import collections
+from importlib import metadata as importlib_metadata
+from importlib import util
+import inspect
+import sys
+
+win32 = sys.platform.startswith("win")
+pypy = hasattr(sys, "pypy_version_info")
+
+ArgSpec = collections.namedtuple(
+    "ArgSpec", ["args", "varargs", "keywords", "defaults"]
+)
+
+
+def inspect_getargspec(func):
+    """getargspec based on fully vendored getfullargspec from Python 3.3."""
+
+    if inspect.ismethod(func):
+        func = func.__func__
+    if not inspect.isfunction(func):
+        raise TypeError(f"{func!r} is not a Python function")
+
+    co = func.__code__
+    if not inspect.iscode(co):
+        raise TypeError(f"{co!r} is not a code object")
+
+    nargs = co.co_argcount
+    names = co.co_varnames
+    nkwargs = co.co_kwonlyargcount
+    args = list(names[:nargs])
+
+    nargs += nkwargs
+    varargs = None
+    if co.co_flags & inspect.CO_VARARGS:
+        varargs = co.co_varnames[nargs]
+        nargs = nargs + 1
+    varkw = None
+    if co.co_flags & inspect.CO_VARKEYWORDS:
+        varkw = co.co_varnames[nargs]
+
+    return ArgSpec(args, varargs, varkw, func.__defaults__)
+
+
+def load_module(module_id, path):
+    spec = util.spec_from_file_location(module_id, path)
+    module = util.module_from_spec(spec)
+    spec.loader.exec_module(module)
+    return module
+
+
+def exception_as():
+    return sys.exc_info()[1]
+
+
+def exception_name(exc):
+    return exc.__class__.__name__
+
+
+def importlib_metadata_get(group):
+    ep = importlib_metadata.entry_points()
+    if hasattr(ep, "select"):
+        return ep.select(group=group)
+    else:
+        return ep.get(group, ())
diff --git a/venv/Lib/site-packages/mako/exceptions.py b/venv/Lib/site-packages/mako/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bf6a6018b6016c8da13c9f191ab77a67f023a7a
--- /dev/null
+++ b/venv/Lib/site-packages/mako/exceptions.py
@@ -0,0 +1,417 @@
+# mako/exceptions.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""exception classes"""
+
+import sys
+import traceback
+
+from mako import compat
+from mako import util
+
+
+class MakoException(Exception):
+    pass
+
+
+class RuntimeException(MakoException):
+    pass
+
+
+def _format_filepos(lineno, pos, filename):
+    if filename is None:
+        return " at line: %d char: %d" % (lineno, pos)
+    else:
+        return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
+
+
+class CompileException(MakoException):
+    def __init__(self, message, source, lineno, pos, filename):
+        MakoException.__init__(
+            self, message + _format_filepos(lineno, pos, filename)
+        )
+        self.lineno = lineno
+        self.pos = pos
+        self.filename = filename
+        self.source = source
+
+
+class SyntaxException(MakoException):
+    def __init__(self, message, source, lineno, pos, filename):
+        MakoException.__init__(
+            self, message + _format_filepos(lineno, pos, filename)
+        )
+        self.lineno = lineno
+        self.pos = pos
+        self.filename = filename
+        self.source = source
+
+
+class UnsupportedError(MakoException):
+
+    """raised when a retired feature is used."""
+
+
+class NameConflictError(MakoException):
+
+    """raised when a reserved word is used inappropriately"""
+
+
+class TemplateLookupException(MakoException):
+    pass
+
+
+class TopLevelLookupException(TemplateLookupException):
+    pass
+
+
+class RichTraceback:
+
+    """Pull the current exception from the ``sys`` traceback and extracts
+    Mako-specific template information.
+
+    See the usage examples in :ref:`handling_exceptions`.
+
+    """
+
+    def __init__(self, error=None, traceback=None):
+        self.source, self.lineno = "", 0
+
+        if error is None or traceback is None:
+            t, value, tback = sys.exc_info()
+
+        if error is None:
+            error = value or t
+
+        if traceback is None:
+            traceback = tback
+
+        self.error = error
+        self.records = self._init(traceback)
+
+        if isinstance(self.error, (CompileException, SyntaxException)):
+            self.source = self.error.source
+            self.lineno = self.error.lineno
+            self._has_source = True
+
+        self._init_message()
+
+    @property
+    def errorname(self):
+        return compat.exception_name(self.error)
+
+    def _init_message(self):
+        """Find a unicode representation of self.error"""
+        try:
+            self.message = str(self.error)
+        except UnicodeError:
+            try:
+                self.message = str(self.error)
+            except UnicodeEncodeError:
+                # Fallback to args as neither unicode nor
+                # str(Exception(u'\xe6')) work in Python < 2.6
+                self.message = self.error.args[0]
+        if not isinstance(self.message, str):
+            self.message = str(self.message, "ascii", "replace")
+
+    def _get_reformatted_records(self, records):
+        for rec in records:
+            if rec[6] is not None:
+                yield (rec[4], rec[5], rec[2], rec[6])
+            else:
+                yield tuple(rec[0:4])
+
+    @property
+    def traceback(self):
+        """Return a list of 4-tuple traceback records (i.e. normal python
+        format) with template-corresponding lines remapped to the originating
+        template.
+
+        """
+        return list(self._get_reformatted_records(self.records))
+
+    @property
+    def reverse_records(self):
+        return reversed(self.records)
+
+    @property
+    def reverse_traceback(self):
+        """Return the same data as traceback, except in reverse order."""
+
+        return list(self._get_reformatted_records(self.reverse_records))
+
+    def _init(self, trcback):
+        """format a traceback from sys.exc_info() into 7-item tuples,
+        containing the regular four traceback tuple items, plus the original
+        template filename, the line number adjusted relative to the template
+        source, and code line from that line number of the template."""
+
+        import mako.template
+
+        mods = {}
+        rawrecords = traceback.extract_tb(trcback)
+        new_trcback = []
+        for filename, lineno, function, line in rawrecords:
+            if not line:
+                line = ""
+            try:
+                (line_map, template_lines, template_filename) = mods[filename]
+            except KeyError:
+                try:
+                    info = mako.template._get_module_info(filename)
+                    module_source = info.code
+                    template_source = info.source
+                    template_filename = (
+                        info.template_filename or info.template_uri or filename
+                    )
+                except KeyError:
+                    # A normal .py file (not a Template)
+                    new_trcback.append(
+                        (
+                            filename,
+                            lineno,
+                            function,
+                            line,
+                            None,
+                            None,
+                            None,
+                            None,
+                        )
+                    )
+                    continue
+
+                template_ln = 1
+
+                mtm = mako.template.ModuleInfo
+                source_map = mtm.get_module_source_metadata(
+                    module_source, full_line_map=True
+                )
+                line_map = source_map["full_line_map"]
+
+                template_lines = [
+                    line_ for line_ in template_source.split("\n")
+                ]
+                mods[filename] = (line_map, template_lines, template_filename)
+
+            template_ln = line_map[lineno - 1]
+
+            if template_ln <= len(template_lines):
+                template_line = template_lines[template_ln - 1]
+            else:
+                template_line = None
+            new_trcback.append(
+                (
+                    filename,
+                    lineno,
+                    function,
+                    line,
+                    template_filename,
+                    template_ln,
+                    template_line,
+                    template_source,
+                )
+            )
+        if not self.source:
+            for l in range(len(new_trcback) - 1, 0, -1):
+                if new_trcback[l][5]:
+                    self.source = new_trcback[l][7]
+                    self.lineno = new_trcback[l][5]
+                    break
+            else:
+                if new_trcback:
+                    try:
+                        # A normal .py file (not a Template)
+                        with open(new_trcback[-1][0], "rb") as fp:
+                            encoding = util.parse_encoding(fp)
+                            if not encoding:
+                                encoding = "utf-8"
+                            fp.seek(0)
+                            self.source = fp.read()
+                        if encoding:
+                            self.source = self.source.decode(encoding)
+                    except IOError:
+                        self.source = ""
+                    self.lineno = new_trcback[-1][1]
+        return new_trcback
+
+
+def text_error_template(lookup=None):
+    """Provides a template that renders a stack trace in a similar format to
+    the Python interpreter, substituting source template filenames, line
+    numbers and code for that of the originating source template, as
+    applicable.
+
+    """
+    import mako.template
+
+    return mako.template.Template(
+        r"""
+<%page args="error=None, traceback=None"/>
+<%!
+    from mako.exceptions import RichTraceback
+%>\
+<%
+    tback = RichTraceback(error=error, traceback=traceback)
+%>\
+Traceback (most recent call last):
+% for (filename, lineno, function, line) in tback.traceback:
+  File "${filename}", line ${lineno}, in ${function or '?'}
+    ${line | trim}
+% endfor
+${tback.errorname}: ${tback.message}
+"""
+    )
+
+
+def _install_pygments():
+    global syntax_highlight, pygments_html_formatter
+    from mako.ext.pygmentplugin import syntax_highlight  # noqa
+    from mako.ext.pygmentplugin import pygments_html_formatter  # noqa
+
+
+def _install_fallback():
+    global syntax_highlight, pygments_html_formatter
+    from mako.filters import html_escape
+
+    pygments_html_formatter = None
+
+    def syntax_highlight(filename="", language=None):
+        return html_escape
+
+
+def _install_highlighting():
+    try:
+        _install_pygments()
+    except ImportError:
+        _install_fallback()
+
+
+_install_highlighting()
+
+
+def html_error_template():
+    """Provides a template that renders a stack trace in an HTML format,
+    providing an excerpt of code as well as substituting source template
+    filenames, line numbers and code for that of the originating source
+    template, as applicable.
+
+    The template's default ``encoding_errors`` value is
+    ``'htmlentityreplace'``. The template has two options. With the
+    ``full`` option disabled, only a section of an HTML document is
+    returned. With the ``css`` option disabled, the default stylesheet
+    won't be included.
+
+    """
+    import mako.template
+
+    return mako.template.Template(
+        r"""
+<%!
+    from mako.exceptions import RichTraceback, syntax_highlight,\
+            pygments_html_formatter
+%>
+<%page args="full=True, css=True, error=None, traceback=None"/>
+% if full:
+<html>
+<head>
+    <title>Mako Runtime Error</title>
+% endif
+% if css:
+    <style>
+        body { font-family:verdana; margin:10px 30px 10px 30px;}
+        .stacktrace { margin:5px 5px 5px 5px; }
+        .highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
+        .nonhighlight { padding:0px; background-color:#DFDFDF; }
+        .sample { padding:10px; margin:10px 10px 10px 10px;
+                  font-family:monospace; }
+        .sampleline { padding:0px 10px 0px 10px; }
+        .sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
+        .location { font-size:80%; }
+        .highlight { white-space:pre; }
+        .sampleline { white-space:pre; }
+
+    % if pygments_html_formatter:
+        ${pygments_html_formatter.get_style_defs()}
+        .linenos { min-width: 2.5em; text-align: right; }
+        pre { margin: 0; }
+        .syntax-highlighted { padding: 0 10px; }
+        .syntax-highlightedtable { border-spacing: 1px; }
+        .nonhighlight { border-top: 1px solid #DFDFDF;
+                        border-bottom: 1px solid #DFDFDF; }
+        .stacktrace .nonhighlight { margin: 5px 15px 10px; }
+        .sourceline { margin: 0 0; font-family:monospace; }
+        .code { background-color: #F8F8F8; width: 100%; }
+        .error .code { background-color: #FFBDBD; }
+        .error .syntax-highlighted { background-color: #FFBDBD; }
+    % endif
+
+    </style>
+% endif
+% if full:
+</head>
+<body>
+% endif
+
+<h2>Error !</h2>
+<%
+    tback = RichTraceback(error=error, traceback=traceback)
+    src = tback.source
+    line = tback.lineno
+    if src:
+        lines = src.split('\n')
+    else:
+        lines = None
+%>
+<h3>${tback.errorname}: ${tback.message|h}</h3>
+
+% if lines:
+    <div class="sample">
+    <div class="nonhighlight">
+% for index in range(max(0, line-4),min(len(lines), line+5)):
+    <%
+       if pygments_html_formatter:
+           pygments_html_formatter.linenostart = index + 1
+    %>
+    % if index + 1 == line:
+    <%
+       if pygments_html_formatter:
+           old_cssclass = pygments_html_formatter.cssclass
+           pygments_html_formatter.cssclass = 'error ' + old_cssclass
+    %>
+        ${lines[index] | syntax_highlight(language='mako')}
+    <%
+       if pygments_html_formatter:
+           pygments_html_formatter.cssclass = old_cssclass
+    %>
+    % else:
+        ${lines[index] | syntax_highlight(language='mako')}
+    % endif
+% endfor
+    </div>
+    </div>
+% endif
+
+<div class="stacktrace">
+% for (filename, lineno, function, line) in tback.reverse_traceback:
+    <div class="location">${filename}, line ${lineno}:</div>
+    <div class="nonhighlight">
+    <%
+       if pygments_html_formatter:
+           pygments_html_formatter.linenostart = lineno
+    %>
+      <div class="sourceline">${line | syntax_highlight(filename)}</div>
+    </div>
+% endfor
+</div>
+
+% if full:
+</body>
+</html>
+% endif
+""",
+        output_encoding=sys.getdefaultencoding(),
+        encoding_errors="htmlentityreplace",
+    )
diff --git a/venv/Lib/site-packages/mako/ext/__init__.py b/venv/Lib/site-packages/mako/ext/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d2b23a4ee947d7bd8ac6d1fb7abb22e558b77c3b
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/autohandler.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/autohandler.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..698c78f4075fcf0ef61478d38a6ec2ada7ca23fd
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/autohandler.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/babelplugin.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/babelplugin.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40710cb74b1c87efb36e3e499683d2ff268215d9
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/babelplugin.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/beaker_cache.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/beaker_cache.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..644d2c8618414dc019ebe3830e101c49a64d8246
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/beaker_cache.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/extract.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/extract.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c90abf6dda198f62bb6ea75cdf63c81496a1332b
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/extract.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/linguaplugin.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/linguaplugin.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f72a6f2ed15a4447dd517521d9b5cb26507f7ff6
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/linguaplugin.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/preprocessors.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/preprocessors.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71aceaa57a8b762393fe36b110d35ea590dee871
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/preprocessors.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/pygmentplugin.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/pygmentplugin.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7912a4148fecbf3309ca42331249418ecbd2aed9
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/pygmentplugin.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/__pycache__/turbogears.cpython-311.pyc b/venv/Lib/site-packages/mako/ext/__pycache__/turbogears.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62be19a24461c3eeae268efb27657f8f38a38971
Binary files /dev/null and b/venv/Lib/site-packages/mako/ext/__pycache__/turbogears.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/ext/autohandler.py b/venv/Lib/site-packages/mako/ext/autohandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..c33f080f2a598ebfec3e6bb3b225069d7fbde044
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ext/autohandler.py
@@ -0,0 +1,70 @@
+# ext/autohandler.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""adds autohandler functionality to Mako templates.
+
+requires that the TemplateLookup class is used with templates.
+
+usage::
+
+    <%!
+        from mako.ext.autohandler import autohandler
+    %>
+    <%inherit file="${autohandler(template, context)}"/>
+
+
+or with custom autohandler filename::
+
+    <%!
+        from mako.ext.autohandler import autohandler
+    %>
+    <%inherit file="${autohandler(template, context, name='somefilename')}"/>
+
+"""
+
+import os
+import posixpath
+import re
+
+
+def autohandler(template, context, name="autohandler"):
+    lookup = context.lookup
+    _template_uri = template.module._template_uri
+    if not lookup.filesystem_checks:
+        try:
+            return lookup._uri_cache[(autohandler, _template_uri, name)]
+        except KeyError:
+            pass
+
+    tokens = re.findall(r"([^/]+)", posixpath.dirname(_template_uri)) + [name]
+    while len(tokens):
+        path = "/" + "/".join(tokens)
+        if path != _template_uri and _file_exists(lookup, path):
+            if not lookup.filesystem_checks:
+                return lookup._uri_cache.setdefault(
+                    (autohandler, _template_uri, name), path
+                )
+            else:
+                return path
+        if len(tokens) == 1:
+            break
+        tokens[-2:] = [name]
+
+    if not lookup.filesystem_checks:
+        return lookup._uri_cache.setdefault(
+            (autohandler, _template_uri, name), None
+        )
+    else:
+        return None
+
+
+def _file_exists(lookup, path):
+    psub = re.sub(r"^/", "", path)
+    for d in lookup.directories:
+        if os.path.exists(d + "/" + psub):
+            return True
+    else:
+        return False
diff --git a/venv/Lib/site-packages/mako/ext/babelplugin.py b/venv/Lib/site-packages/mako/ext/babelplugin.py
new file mode 100644
index 0000000000000000000000000000000000000000..5126d6f4d8177d4c81c55869d5ea5432dae912e1
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ext/babelplugin.py
@@ -0,0 +1,57 @@
+# ext/babelplugin.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""gettext message extraction via Babel: https://pypi.org/project/Babel/"""
+from babel.messages.extract import extract_python
+
+from mako.ext.extract import MessageExtractor
+
+
+class BabelMakoExtractor(MessageExtractor):
+    def __init__(self, keywords, comment_tags, options):
+        self.keywords = keywords
+        self.options = options
+        self.config = {
+            "comment-tags": " ".join(comment_tags),
+            "encoding": options.get(
+                "input_encoding", options.get("encoding", None)
+            ),
+        }
+        super().__init__()
+
+    def __call__(self, fileobj):
+        return self.process_file(fileobj)
+
+    def process_python(self, code, code_lineno, translator_strings):
+        comment_tags = self.config["comment-tags"]
+        for (
+            lineno,
+            funcname,
+            messages,
+            python_translator_comments,
+        ) in extract_python(code, self.keywords, comment_tags, self.options):
+            yield (
+                code_lineno + (lineno - 1),
+                funcname,
+                messages,
+                translator_strings + python_translator_comments,
+            )
+
+
+def extract(fileobj, keywords, comment_tags, options):
+    """Extract messages from Mako templates.
+
+    :param fileobj: the file-like object the messages should be extracted from
+    :param keywords: a list of keywords (i.e. function names) that should be
+                     recognized as translation functions
+    :param comment_tags: a list of translator tags to search for and include
+                         in the results
+    :param options: a dictionary of additional options (optional)
+    :return: an iterator over ``(lineno, funcname, message, comments)`` tuples
+    :rtype: ``iterator``
+    """
+    extractor = BabelMakoExtractor(keywords, comment_tags, options)
+    yield from extractor(fileobj)
diff --git a/venv/Lib/site-packages/mako/ext/beaker_cache.py b/venv/Lib/site-packages/mako/ext/beaker_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f1f9d4f8e2b57ee4a3382fe881c1c32899edd7b
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ext/beaker_cache.py
@@ -0,0 +1,82 @@
+# ext/beaker_cache.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""Provide a :class:`.CacheImpl` for the Beaker caching system."""
+
+from mako import exceptions
+from mako.cache import CacheImpl
+
+try:
+    from beaker import cache as beaker_cache
+except:
+    has_beaker = False
+else:
+    has_beaker = True
+
+_beaker_cache = None
+
+
+class BeakerCacheImpl(CacheImpl):
+
+    """A :class:`.CacheImpl` provided for the Beaker caching system.
+
+    This plugin is used by default, based on the default
+    value of ``'beaker'`` for the ``cache_impl`` parameter of the
+    :class:`.Template` or :class:`.TemplateLookup` classes.
+
+    """
+
+    def __init__(self, cache):
+        if not has_beaker:
+            raise exceptions.RuntimeException(
+                "Can't initialize Beaker plugin; Beaker is not installed."
+            )
+        global _beaker_cache
+        if _beaker_cache is None:
+            if "manager" in cache.template.cache_args:
+                _beaker_cache = cache.template.cache_args["manager"]
+            else:
+                _beaker_cache = beaker_cache.CacheManager()
+        super().__init__(cache)
+
+    def _get_cache(self, **kw):
+        expiretime = kw.pop("timeout", None)
+        if "dir" in kw:
+            kw["data_dir"] = kw.pop("dir")
+        elif self.cache.template.module_directory:
+            kw["data_dir"] = self.cache.template.module_directory
+
+        if "manager" in kw:
+            kw.pop("manager")
+
+        if kw.get("type") == "memcached":
+            kw["type"] = "ext:memcached"
+
+        if "region" in kw:
+            region = kw.pop("region")
+            cache = _beaker_cache.get_cache_region(self.cache.id, region, **kw)
+        else:
+            cache = _beaker_cache.get_cache(self.cache.id, **kw)
+        cache_args = {"starttime": self.cache.starttime}
+        if expiretime:
+            cache_args["expiretime"] = expiretime
+        return cache, cache_args
+
+    def get_or_create(self, key, creation_function, **kw):
+        cache, kw = self._get_cache(**kw)
+        return cache.get(key, createfunc=creation_function, **kw)
+
+    def put(self, key, value, **kw):
+        cache, kw = self._get_cache(**kw)
+        cache.put(key, value, **kw)
+
+    def get(self, key, **kw):
+        cache, kw = self._get_cache(**kw)
+        return cache.get(key, **kw)
+
+    def invalidate(self, key, **kw):
+        cache, kw = self._get_cache(**kw)
+        cache.remove_value(key, **kw)
diff --git a/venv/Lib/site-packages/mako/ext/extract.py b/venv/Lib/site-packages/mako/ext/extract.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa7fffa870f9edae113c61b6cc246aaf1e29e8ff
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ext/extract.py
@@ -0,0 +1,129 @@
+# ext/extract.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from io import BytesIO
+from io import StringIO
+import re
+
+from mako import lexer
+from mako import parsetree
+
+
+class MessageExtractor:
+    use_bytes = True
+
+    def process_file(self, fileobj):
+        template_node = lexer.Lexer(
+            fileobj.read(), input_encoding=self.config["encoding"]
+        ).parse()
+        yield from self.extract_nodes(template_node.get_children())
+
+    def extract_nodes(self, nodes):
+        translator_comments = []
+        in_translator_comments = False
+        input_encoding = self.config["encoding"] or "ascii"
+        comment_tags = list(
+            filter(None, re.split(r"\s+", self.config["comment-tags"]))
+        )
+
+        for node in nodes:
+            child_nodes = None
+            if (
+                in_translator_comments
+                and isinstance(node, parsetree.Text)
+                and not node.content.strip()
+            ):
+                # Ignore whitespace within translator comments
+                continue
+
+            if isinstance(node, parsetree.Comment):
+                value = node.text.strip()
+                if in_translator_comments:
+                    translator_comments.extend(
+                        self._split_comment(node.lineno, value)
+                    )
+                    continue
+                for comment_tag in comment_tags:
+                    if value.startswith(comment_tag):
+                        in_translator_comments = True
+                        translator_comments.extend(
+                            self._split_comment(node.lineno, value)
+                        )
+                continue
+
+            if isinstance(node, parsetree.DefTag):
+                code = node.function_decl.code
+                child_nodes = node.nodes
+            elif isinstance(node, parsetree.BlockTag):
+                code = node.body_decl.code
+                child_nodes = node.nodes
+            elif isinstance(node, parsetree.CallTag):
+                code = node.code.code
+                child_nodes = node.nodes
+            elif isinstance(node, parsetree.PageTag):
+                code = node.body_decl.code
+            elif isinstance(node, parsetree.CallNamespaceTag):
+                code = node.expression
+                child_nodes = node.nodes
+            elif isinstance(node, parsetree.ControlLine):
+                if node.isend:
+                    in_translator_comments = False
+                    continue
+                code = node.text
+            elif isinstance(node, parsetree.Code):
+                in_translator_comments = False
+                code = node.code.code
+            elif isinstance(node, parsetree.Expression):
+                code = node.code.code
+            else:
+                continue
+
+            # Comments don't apply unless they immediately precede the message
+            if (
+                translator_comments
+                and translator_comments[-1][0] < node.lineno - 1
+            ):
+                translator_comments = []
+
+            translator_strings = [
+                comment[1] for comment in translator_comments
+            ]
+
+            if isinstance(code, str) and self.use_bytes:
+                code = code.encode(input_encoding, "backslashreplace")
+
+            used_translator_comments = False
+            # We add extra newline to work around a pybabel bug
+            # (see python-babel/babel#274, parse_encoding dies if the first
+            # input string of the input is non-ascii)
+            # Also, because we added it, we have to subtract one from
+            # node.lineno
+            if self.use_bytes:
+                code = BytesIO(b"\n" + code)
+            else:
+                code = StringIO("\n" + code)
+
+            for message in self.process_python(
+                code, node.lineno - 1, translator_strings
+            ):
+                yield message
+                used_translator_comments = True
+
+            if used_translator_comments:
+                translator_comments = []
+            in_translator_comments = False
+
+            if child_nodes:
+                yield from self.extract_nodes(child_nodes)
+
+    @staticmethod
+    def _split_comment(lineno, comment):
+        """Return the multiline comment at lineno split into a list of
+        comment line numbers and the accompanying comment line"""
+        return [
+            (lineno + index, line)
+            for index, line in enumerate(comment.splitlines())
+        ]
diff --git a/venv/Lib/site-packages/mako/ext/linguaplugin.py b/venv/Lib/site-packages/mako/ext/linguaplugin.py
new file mode 100644
index 0000000000000000000000000000000000000000..8058b3675b93230596d6bd2b48457b464dcc63f0
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ext/linguaplugin.py
@@ -0,0 +1,57 @@
+# ext/linguaplugin.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import contextlib
+import io
+
+from lingua.extractors import Extractor
+from lingua.extractors import get_extractor
+from lingua.extractors import Message
+
+from mako.ext.extract import MessageExtractor
+
+
+class LinguaMakoExtractor(Extractor, MessageExtractor):
+    """Mako templates"""
+
+    use_bytes = False
+    extensions = [".mako"]
+    default_config = {"encoding": "utf-8", "comment-tags": ""}
+
+    def __call__(self, filename, options, fileobj=None):
+        self.options = options
+        self.filename = filename
+        self.python_extractor = get_extractor("x.py")
+        if fileobj is None:
+            ctx = open(filename, "r")
+        else:
+            ctx = contextlib.nullcontext(fileobj)
+        with ctx as file_:
+            yield from self.process_file(file_)
+
+    def process_python(self, code, code_lineno, translator_strings):
+        source = code.getvalue().strip()
+        if source.endswith(":"):
+            if source in ("try:", "else:") or source.startswith("except"):
+                source = ""  # Ignore try/except and else
+            elif source.startswith("elif"):
+                source = source[2:]  # Replace "elif" with "if"
+            source += "pass"
+        code = io.StringIO(source)
+        for msg in self.python_extractor(
+            self.filename, self.options, code, code_lineno - 1
+        ):
+            if translator_strings:
+                msg = Message(
+                    msg.msgctxt,
+                    msg.msgid,
+                    msg.msgid_plural,
+                    msg.flags,
+                    " ".join(translator_strings + [msg.comment]),
+                    msg.tcomment,
+                    msg.location,
+                )
+            yield msg
diff --git a/venv/Lib/site-packages/mako/ext/preprocessors.py b/venv/Lib/site-packages/mako/ext/preprocessors.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2856853d6cb93bbfe16bb9f22be3047ccd2594b
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ext/preprocessors.py
@@ -0,0 +1,20 @@
+# ext/preprocessors.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""preprocessing functions, used with the 'preprocessor'
+argument on Template, TemplateLookup"""
+
+import re
+
+
+def convert_comments(text):
+    """preprocess old style comments.
+
+    example:
+
+    from mako.ext.preprocessors import convert_comments
+    t = Template(..., preprocessor=convert_comments)"""
+    return re.sub(r"(?<=\n)\s*#[^#]", "##", text)
diff --git a/venv/Lib/site-packages/mako/ext/pygmentplugin.py b/venv/Lib/site-packages/mako/ext/pygmentplugin.py
new file mode 100644
index 0000000000000000000000000000000000000000..7763bc8681e864129f648f68b14680435e9d6a86
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ext/pygmentplugin.py
@@ -0,0 +1,150 @@
+# ext/pygmentplugin.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from pygments import highlight
+from pygments.formatters.html import HtmlFormatter
+from pygments.lexer import bygroups
+from pygments.lexer import DelegatingLexer
+from pygments.lexer import include
+from pygments.lexer import RegexLexer
+from pygments.lexer import using
+from pygments.lexers.agile import Python3Lexer
+from pygments.lexers.agile import PythonLexer
+from pygments.lexers.web import CssLexer
+from pygments.lexers.web import HtmlLexer
+from pygments.lexers.web import JavascriptLexer
+from pygments.lexers.web import XmlLexer
+from pygments.token import Comment
+from pygments.token import Keyword
+from pygments.token import Name
+from pygments.token import Operator
+from pygments.token import Other
+from pygments.token import String
+from pygments.token import Text
+
+
+class MakoLexer(RegexLexer):
+    name = "Mako"
+    aliases = ["mako"]
+    filenames = ["*.mao"]
+
+    tokens = {
+        "root": [
+            (
+                r"(\s*)(\%)(\s*end(?:\w+))(\n|\Z)",
+                bygroups(Text, Comment.Preproc, Keyword, Other),
+            ),
+            (
+                r"(\s*)(\%(?!%))([^\n]*)(\n|\Z)",
+                bygroups(Text, Comment.Preproc, using(PythonLexer), Other),
+            ),
+            (
+                r"(\s*)(##[^\n]*)(\n|\Z)",
+                bygroups(Text, Comment.Preproc, Other),
+            ),
+            (r"""(?s)<%doc>.*?</%doc>""", Comment.Preproc),
+            (
+                r"(<%)([\w\.\:]+)",
+                bygroups(Comment.Preproc, Name.Builtin),
+                "tag",
+            ),
+            (
+                r"(</%)([\w\.\:]+)(>)",
+                bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc),
+            ),
+            (r"<%(?=([\w\.\:]+))", Comment.Preproc, "ondeftags"),
+            (
+                r"(?s)(<%(?:!?))(.*?)(%>)",
+                bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc),
+            ),
+            (
+                r"(\$\{)(.*?)(\})",
+                bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc),
+            ),
+            (
+                r"""(?sx)
+                (.+?)               # anything, followed by:
+                (?:
+                 (?<=\n)(?=%(?!%)|\#\#) |  # an eval or comment line
+                 (?=\#\*) |          # multiline comment
+                 (?=</?%) |         # a python block
+                                    # call start or end
+                 (?=\$\{) |         # a substitution
+                 (?<=\n)(?=\s*%) |
+                                    # - don't consume
+                 (\\\n) |           # an escaped newline
+                 \Z                 # end of string
+                )
+            """,
+                bygroups(Other, Operator),
+            ),
+            (r"\s+", Text),
+        ],
+        "ondeftags": [
+            (r"<%", Comment.Preproc),
+            (r"(?<=<%)(include|inherit|namespace|page)", Name.Builtin),
+            include("tag"),
+        ],
+        "tag": [
+            (r'((?:\w+)\s*=)\s*(".*?")', bygroups(Name.Attribute, String)),
+            (r"/?\s*>", Comment.Preproc, "#pop"),
+            (r"\s+", Text),
+        ],
+        "attr": [
+            ('".*?"', String, "#pop"),
+            ("'.*?'", String, "#pop"),
+            (r"[^\s>]+", String, "#pop"),
+        ],
+    }
+
+
+class MakoHtmlLexer(DelegatingLexer):
+    name = "HTML+Mako"
+    aliases = ["html+mako"]
+
+    def __init__(self, **options):
+        super().__init__(HtmlLexer, MakoLexer, **options)
+
+
+class MakoXmlLexer(DelegatingLexer):
+    name = "XML+Mako"
+    aliases = ["xml+mako"]
+
+    def __init__(self, **options):
+        super().__init__(XmlLexer, MakoLexer, **options)
+
+
+class MakoJavascriptLexer(DelegatingLexer):
+    name = "JavaScript+Mako"
+    aliases = ["js+mako", "javascript+mako"]
+
+    def __init__(self, **options):
+        super().__init__(JavascriptLexer, MakoLexer, **options)
+
+
+class MakoCssLexer(DelegatingLexer):
+    name = "CSS+Mako"
+    aliases = ["css+mako"]
+
+    def __init__(self, **options):
+        super().__init__(CssLexer, MakoLexer, **options)
+
+
+pygments_html_formatter = HtmlFormatter(
+    cssclass="syntax-highlighted", linenos=True
+)
+
+
+def syntax_highlight(filename="", language=None):
+    mako_lexer = MakoLexer()
+    python_lexer = Python3Lexer()
+    if filename.startswith("memory:") or language == "mako":
+        return lambda string: highlight(
+            string, mako_lexer, pygments_html_formatter
+        )
+    return lambda string: highlight(
+        string, python_lexer, pygments_html_formatter
+    )
diff --git a/venv/Lib/site-packages/mako/ext/turbogears.py b/venv/Lib/site-packages/mako/ext/turbogears.py
new file mode 100644
index 0000000000000000000000000000000000000000..28f2696bc1ab89372cd2e9c04630b2b6d3c9332f
--- /dev/null
+++ b/venv/Lib/site-packages/mako/ext/turbogears.py
@@ -0,0 +1,61 @@
+# ext/turbogears.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from mako import compat
+from mako.lookup import TemplateLookup
+from mako.template import Template
+
+
+class TGPlugin:
+
+    """TurboGears compatible Template Plugin."""
+
+    def __init__(self, extra_vars_func=None, options=None, extension="mak"):
+        self.extra_vars_func = extra_vars_func
+        self.extension = extension
+        if not options:
+            options = {}
+
+        # Pull the options out and initialize the lookup
+        lookup_options = {}
+        for k, v in options.items():
+            if k.startswith("mako."):
+                lookup_options[k[5:]] = v
+            elif k in ["directories", "filesystem_checks", "module_directory"]:
+                lookup_options[k] = v
+        self.lookup = TemplateLookup(**lookup_options)
+
+        self.tmpl_options = {}
+        # transfer lookup args to template args, based on those available
+        # in getargspec
+        for kw in compat.inspect_getargspec(Template.__init__)[0]:
+            if kw in lookup_options:
+                self.tmpl_options[kw] = lookup_options[kw]
+
+    def load_template(self, templatename, template_string=None):
+        """Loads a template from a file or a string"""
+        if template_string is not None:
+            return Template(template_string, **self.tmpl_options)
+        # Translate TG dot notation to normal / template path
+        if "/" not in templatename:
+            templatename = (
+                "/" + templatename.replace(".", "/") + "." + self.extension
+            )
+
+        # Lookup template
+        return self.lookup.get_template(templatename)
+
+    def render(
+        self, info, format="html", fragment=False, template=None  # noqa
+    ):
+        if isinstance(template, str):
+            template = self.load_template(template)
+
+        # Load extra vars func if provided
+        if self.extra_vars_func:
+            info.update(self.extra_vars_func())
+
+        return template.render(**info)
diff --git a/venv/Lib/site-packages/mako/filters.py b/venv/Lib/site-packages/mako/filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..b255aaf8d59a900f7099058c9731b057946a8ad9
--- /dev/null
+++ b/venv/Lib/site-packages/mako/filters.py
@@ -0,0 +1,163 @@
+# mako/filters.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+
+import codecs
+from html.entities import codepoint2name
+from html.entities import name2codepoint
+import re
+from urllib.parse import quote_plus
+
+import markupsafe
+
+html_escape = markupsafe.escape
+
+xml_escapes = {
+    "&": "&amp;",
+    ">": "&gt;",
+    "<": "&lt;",
+    '"': "&#34;",  # also &quot; in html-only
+    "'": "&#39;",  # also &apos; in html-only
+}
+
+
+def xml_escape(string):
+    return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
+
+
+def url_escape(string):
+    # convert into a list of octets
+    string = string.encode("utf8")
+    return quote_plus(string)
+
+
+def trim(string):
+    return string.strip()
+
+
+class Decode:
+    def __getattr__(self, key):
+        def decode(x):
+            if isinstance(x, str):
+                return x
+            elif not isinstance(x, bytes):
+                return decode(str(x))
+            else:
+                return str(x, encoding=key)
+
+        return decode
+
+
+decode = Decode()
+
+
+class XMLEntityEscaper:
+    def __init__(self, codepoint2name, name2codepoint):
+        self.codepoint2entity = {
+            c: str("&%s;" % n) for c, n in codepoint2name.items()
+        }
+        self.name2codepoint = name2codepoint
+
+    def escape_entities(self, text):
+        """Replace characters with their character entity references.
+
+        Only characters corresponding to a named entity are replaced.
+        """
+        return str(text).translate(self.codepoint2entity)
+
+    def __escape(self, m):
+        codepoint = ord(m.group())
+        try:
+            return self.codepoint2entity[codepoint]
+        except (KeyError, IndexError):
+            return "&#x%X;" % codepoint
+
+    __escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
+
+    def escape(self, text):
+        """Replace characters with their character references.
+
+        Replace characters by their named entity references.
+        Non-ASCII characters, if they do not have a named entity reference,
+        are replaced by numerical character references.
+
+        The return value is guaranteed to be ASCII.
+        """
+        return self.__escapable.sub(self.__escape, str(text)).encode("ascii")
+
+    # XXX: This regexp will not match all valid XML entity names__.
+    # (It punts on details involving involving CombiningChars and Extenders.)
+    #
+    # .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
+    __characterrefs = re.compile(
+        r"""& (?:
+                                          \#(\d+)
+                                          | \#x([\da-f]+)
+                                          | ( (?!\d) [:\w] [-.:\w]+ )
+                                          ) ;""",
+        re.X | re.UNICODE,
+    )
+
+    def __unescape(self, m):
+        dval, hval, name = m.groups()
+        if dval:
+            codepoint = int(dval)
+        elif hval:
+            codepoint = int(hval, 16)
+        else:
+            codepoint = self.name2codepoint.get(name, 0xFFFD)
+            # U+FFFD = "REPLACEMENT CHARACTER"
+        if codepoint < 128:
+            return chr(codepoint)
+        return chr(codepoint)
+
+    def unescape(self, text):
+        """Unescape character references.
+
+        All character references (both entity references and numerical
+        character references) are unescaped.
+        """
+        return self.__characterrefs.sub(self.__unescape, text)
+
+
+_html_entities_escaper = XMLEntityEscaper(codepoint2name, name2codepoint)
+
+html_entities_escape = _html_entities_escaper.escape_entities
+html_entities_unescape = _html_entities_escaper.unescape
+
+
+def htmlentityreplace_errors(ex):
+    """An encoding error handler.
+
+    This python codecs error handler replaces unencodable
+    characters with HTML entities, or, if no HTML entity exists for
+    the character, XML character references::
+
+        >>> 'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
+        'The cost was &euro;12.'
+    """
+    if isinstance(ex, UnicodeEncodeError):
+        # Handle encoding errors
+        bad_text = ex.object[ex.start : ex.end]
+        text = _html_entities_escaper.escape(bad_text)
+        return (str(text), ex.end)
+    raise ex
+
+
+codecs.register_error("htmlentityreplace", htmlentityreplace_errors)
+
+
+DEFAULT_ESCAPES = {
+    "x": "filters.xml_escape",
+    "h": "filters.html_escape",
+    "u": "filters.url_escape",
+    "trim": "filters.trim",
+    "entity": "filters.html_entities_escape",
+    "unicode": "str",
+    "decode": "decode",
+    "str": "str",
+    "n": "n",
+}
diff --git a/venv/Lib/site-packages/mako/lexer.py b/venv/Lib/site-packages/mako/lexer.py
new file mode 100644
index 0000000000000000000000000000000000000000..34f17dc9930a136606dc4f82d3a926ac982f3a22
--- /dev/null
+++ b/venv/Lib/site-packages/mako/lexer.py
@@ -0,0 +1,469 @@
+# mako/lexer.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""provides the Lexer class for parsing template strings into parse trees."""
+
+import codecs
+import re
+
+from mako import exceptions
+from mako import parsetree
+from mako.pygen import adjust_whitespace
+
+_regexp_cache = {}
+
+
+class Lexer:
+    def __init__(
+        self, text, filename=None, input_encoding=None, preprocessor=None
+    ):
+        self.text = text
+        self.filename = filename
+        self.template = parsetree.TemplateNode(self.filename)
+        self.matched_lineno = 1
+        self.matched_charpos = 0
+        self.lineno = 1
+        self.match_position = 0
+        self.tag = []
+        self.control_line = []
+        self.ternary_stack = []
+        self.encoding = input_encoding
+
+        if preprocessor is None:
+            self.preprocessor = []
+        elif not hasattr(preprocessor, "__iter__"):
+            self.preprocessor = [preprocessor]
+        else:
+            self.preprocessor = preprocessor
+
+    @property
+    def exception_kwargs(self):
+        return {
+            "source": self.text,
+            "lineno": self.matched_lineno,
+            "pos": self.matched_charpos,
+            "filename": self.filename,
+        }
+
+    def match(self, regexp, flags=None):
+        """compile the given regexp, cache the reg, and call match_reg()."""
+
+        try:
+            reg = _regexp_cache[(regexp, flags)]
+        except KeyError:
+            reg = re.compile(regexp, flags) if flags else re.compile(regexp)
+            _regexp_cache[(regexp, flags)] = reg
+
+        return self.match_reg(reg)
+
+    def match_reg(self, reg):
+        """match the given regular expression object to the current text
+        position.
+
+        if a match occurs, update the current text and line position.
+
+        """
+
+        mp = self.match_position
+
+        match = reg.match(self.text, self.match_position)
+        if match:
+            (start, end) = match.span()
+            self.match_position = end + 1 if end == start else end
+            self.matched_lineno = self.lineno
+            cp = mp - 1
+            if cp >= 0 and cp < self.textlength:
+                cp = self.text[: cp + 1].rfind("\n")
+            self.matched_charpos = mp - cp
+            self.lineno += self.text[mp : self.match_position].count("\n")
+        return match
+
+    def parse_until_text(self, watch_nesting, *text):
+        startpos = self.match_position
+        text_re = r"|".join(text)
+        brace_level = 0
+        paren_level = 0
+        bracket_level = 0
+        while True:
+            match = self.match(r"#.*\n")
+            if match:
+                continue
+            match = self.match(
+                r"(\"\"\"|\'\'\'|\"|\')[^\\]*?(\\.[^\\]*?)*\1", re.S
+            )
+            if match:
+                continue
+            match = self.match(r"(%s)" % text_re)
+            if match and not (
+                watch_nesting
+                and (brace_level > 0 or paren_level > 0 or bracket_level > 0)
+            ):
+                return (
+                    self.text[
+                        startpos : self.match_position - len(match.group(1))
+                    ],
+                    match.group(1),
+                )
+            elif not match:
+                match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S)
+            if match:
+                brace_level += match.group(1).count("{")
+                brace_level -= match.group(1).count("}")
+                paren_level += match.group(1).count("(")
+                paren_level -= match.group(1).count(")")
+                bracket_level += match.group(1).count("[")
+                bracket_level -= match.group(1).count("]")
+                continue
+            raise exceptions.SyntaxException(
+                "Expected: %s" % ",".join(text), **self.exception_kwargs
+            )
+
+    def append_node(self, nodecls, *args, **kwargs):
+        kwargs.setdefault("source", self.text)
+        kwargs.setdefault("lineno", self.matched_lineno)
+        kwargs.setdefault("pos", self.matched_charpos)
+        kwargs["filename"] = self.filename
+        node = nodecls(*args, **kwargs)
+        if len(self.tag):
+            self.tag[-1].nodes.append(node)
+        else:
+            self.template.nodes.append(node)
+        # build a set of child nodes for the control line
+        # (used for loop variable detection)
+        # also build a set of child nodes on ternary control lines
+        # (used for determining if a pass needs to be auto-inserted
+        if self.control_line:
+            control_frame = self.control_line[-1]
+            control_frame.nodes.append(node)
+            if (
+                not (
+                    isinstance(node, parsetree.ControlLine)
+                    and control_frame.is_ternary(node.keyword)
+                )
+                and self.ternary_stack
+                and self.ternary_stack[-1]
+            ):
+                self.ternary_stack[-1][-1].nodes.append(node)
+        if isinstance(node, parsetree.Tag):
+            if len(self.tag):
+                node.parent = self.tag[-1]
+            self.tag.append(node)
+        elif isinstance(node, parsetree.ControlLine):
+            if node.isend:
+                self.control_line.pop()
+                self.ternary_stack.pop()
+            elif node.is_primary:
+                self.control_line.append(node)
+                self.ternary_stack.append([])
+            elif self.control_line and self.control_line[-1].is_ternary(
+                node.keyword
+            ):
+                self.ternary_stack[-1].append(node)
+            elif self.control_line and not self.control_line[-1].is_ternary(
+                node.keyword
+            ):
+                raise exceptions.SyntaxException(
+                    "Keyword '%s' not a legal ternary for keyword '%s'"
+                    % (node.keyword, self.control_line[-1].keyword),
+                    **self.exception_kwargs,
+                )
+
+    _coding_re = re.compile(r"#.*coding[:=]\s*([-\w.]+).*\r?\n")
+
+    def decode_raw_stream(self, text, decode_raw, known_encoding, filename):
+        """given string/unicode or bytes/string, determine encoding
+        from magic encoding comment, return body as unicode
+        or raw if decode_raw=False
+
+        """
+        if isinstance(text, str):
+            m = self._coding_re.match(text)
+            encoding = m and m.group(1) or known_encoding or "utf-8"
+            return encoding, text
+
+        if text.startswith(codecs.BOM_UTF8):
+            text = text[len(codecs.BOM_UTF8) :]
+            parsed_encoding = "utf-8"
+            m = self._coding_re.match(text.decode("utf-8", "ignore"))
+            if m is not None and m.group(1) != "utf-8":
+                raise exceptions.CompileException(
+                    "Found utf-8 BOM in file, with conflicting "
+                    "magic encoding comment of '%s'" % m.group(1),
+                    text.decode("utf-8", "ignore"),
+                    0,
+                    0,
+                    filename,
+                )
+        else:
+            m = self._coding_re.match(text.decode("utf-8", "ignore"))
+            parsed_encoding = m.group(1) if m else known_encoding or "utf-8"
+        if decode_raw:
+            try:
+                text = text.decode(parsed_encoding)
+            except UnicodeDecodeError:
+                raise exceptions.CompileException(
+                    "Unicode decode operation of encoding '%s' failed"
+                    % parsed_encoding,
+                    text.decode("utf-8", "ignore"),
+                    0,
+                    0,
+                    filename,
+                )
+
+        return parsed_encoding, text
+
+    def parse(self):
+        self.encoding, self.text = self.decode_raw_stream(
+            self.text, True, self.encoding, self.filename
+        )
+
+        for preproc in self.preprocessor:
+            self.text = preproc(self.text)
+
+        # push the match marker past the
+        # encoding comment.
+        self.match_reg(self._coding_re)
+
+        self.textlength = len(self.text)
+
+        while True:
+            if self.match_position > self.textlength:
+                break
+
+            if self.match_end():
+                break
+            if self.match_expression():
+                continue
+            if self.match_control_line():
+                continue
+            if self.match_comment():
+                continue
+            if self.match_tag_start():
+                continue
+            if self.match_tag_end():
+                continue
+            if self.match_python_block():
+                continue
+            if self.match_text():
+                continue
+
+            if self.match_position > self.textlength:
+                break
+            # TODO: no coverage here
+            raise exceptions.MakoException("assertion failed")
+
+        if len(self.tag):
+            raise exceptions.SyntaxException(
+                "Unclosed tag: <%%%s>" % self.tag[-1].keyword,
+                **self.exception_kwargs,
+            )
+        if len(self.control_line):
+            raise exceptions.SyntaxException(
+                "Unterminated control keyword: '%s'"
+                % self.control_line[-1].keyword,
+                self.text,
+                self.control_line[-1].lineno,
+                self.control_line[-1].pos,
+                self.filename,
+            )
+        return self.template
+
+    def match_tag_start(self):
+        reg = r"""
+            \<%     # opening tag
+
+            ([\w\.\:]+)   # keyword
+
+            ((?:\s+\w+|\s*=\s*|"[^"]*?"|'[^']*?'|\s*,\s*)*)  # attrname, = \
+                                               #        sign, string expression
+                                               # comma is for backwards compat
+                                               # identified in #366
+
+            \s*     # more whitespace
+
+            (/)?>   # closing
+
+        """
+
+        match = self.match(
+            reg,
+            re.I | re.S | re.X,
+        )
+
+        if not match:
+            return False
+
+        keyword, attr, isend = match.groups()
+        self.keyword = keyword
+        attributes = {}
+        if attr:
+            for att in re.findall(
+                r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr
+            ):
+                key, val1, val2 = att
+                text = val1 or val2
+                text = text.replace("\r\n", "\n")
+                attributes[key] = text
+        self.append_node(parsetree.Tag, keyword, attributes)
+        if isend:
+            self.tag.pop()
+        elif keyword == "text":
+            match = self.match(r"(.*?)(?=\</%text>)", re.S)
+            if not match:
+                raise exceptions.SyntaxException(
+                    "Unclosed tag: <%%%s>" % self.tag[-1].keyword,
+                    **self.exception_kwargs,
+                )
+            self.append_node(parsetree.Text, match.group(1))
+            return self.match_tag_end()
+        return True
+
+    def match_tag_end(self):
+        match = self.match(r"\</%[\t ]*([^\t ]+?)[\t ]*>")
+        if match:
+            if not len(self.tag):
+                raise exceptions.SyntaxException(
+                    "Closing tag without opening tag: </%%%s>"
+                    % match.group(1),
+                    **self.exception_kwargs,
+                )
+            elif self.tag[-1].keyword != match.group(1):
+                raise exceptions.SyntaxException(
+                    "Closing tag </%%%s> does not match tag: <%%%s>"
+                    % (match.group(1), self.tag[-1].keyword),
+                    **self.exception_kwargs,
+                )
+            self.tag.pop()
+            return True
+        else:
+            return False
+
+    def match_end(self):
+        match = self.match(r"\Z", re.S)
+        if not match:
+            return False
+
+        string = match.group()
+        if string:
+            return string
+        else:
+            return True
+
+    def match_text(self):
+        match = self.match(
+            r"""
+                (.*?)         # anything, followed by:
+                (
+                 (?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
+                                             # comment preceded by a
+                                             # consumed newline and whitespace
+                 |
+                 (?=\${)      # an expression
+                 |
+                 (?=</?[%&])  # a substitution or block or call start or end
+                              # - don't consume
+                 |
+                 (\\\r?\n)    # an escaped newline  - throw away
+                 |
+                 \Z           # end of string
+                )""",
+            re.X | re.S,
+        )
+
+        if match:
+            text = match.group(1)
+            if text:
+                self.append_node(parsetree.Text, text)
+            return True
+        else:
+            return False
+
+    def match_python_block(self):
+        match = self.match(r"<%(!)?")
+        if match:
+            line, pos = self.matched_lineno, self.matched_charpos
+            text, end = self.parse_until_text(False, r"%>")
+            # the trailing newline helps
+            # compiler.parse() not complain about indentation
+            text = adjust_whitespace(text) + "\n"
+            self.append_node(
+                parsetree.Code,
+                text,
+                match.group(1) == "!",
+                lineno=line,
+                pos=pos,
+            )
+            return True
+        else:
+            return False
+
+    def match_expression(self):
+        match = self.match(r"\${")
+        if not match:
+            return False
+
+        line, pos = self.matched_lineno, self.matched_charpos
+        text, end = self.parse_until_text(True, r"\|", r"}")
+        if end == "|":
+            escapes, end = self.parse_until_text(True, r"}")
+        else:
+            escapes = ""
+        text = text.replace("\r\n", "\n")
+        self.append_node(
+            parsetree.Expression,
+            text,
+            escapes.strip(),
+            lineno=line,
+            pos=pos,
+        )
+        return True
+
+    def match_control_line(self):
+        match = self.match(
+            r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\\r?\n)|[^\r\n])*)"
+            r"(?:\r?\n|\Z)",
+            re.M,
+        )
+        if not match:
+            return False
+
+        operator = match.group(1)
+        text = match.group(2)
+        if operator == "%":
+            m2 = re.match(r"(end)?(\w+)\s*(.*)", text)
+            if not m2:
+                raise exceptions.SyntaxException(
+                    "Invalid control line: '%s'" % text,
+                    **self.exception_kwargs,
+                )
+            isend, keyword = m2.group(1, 2)
+            isend = isend is not None
+
+            if isend:
+                if not len(self.control_line):
+                    raise exceptions.SyntaxException(
+                        "No starting keyword '%s' for '%s'" % (keyword, text),
+                        **self.exception_kwargs,
+                    )
+                elif self.control_line[-1].keyword != keyword:
+                    raise exceptions.SyntaxException(
+                        "Keyword '%s' doesn't match keyword '%s'"
+                        % (text, self.control_line[-1].keyword),
+                        **self.exception_kwargs,
+                    )
+            self.append_node(parsetree.ControlLine, keyword, isend, text)
+        else:
+            self.append_node(parsetree.Comment, text)
+        return True
+
+    def match_comment(self):
+        """matches the multiline version of a comment"""
+        match = self.match(r"<%doc>(.*?)</%doc>", re.S)
+        if match:
+            self.append_node(parsetree.Comment, match.group(1))
+            return True
+        else:
+            return False
diff --git a/venv/Lib/site-packages/mako/lookup.py b/venv/Lib/site-packages/mako/lookup.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea1aec60073786ca8505ffd634c1963081b05513
--- /dev/null
+++ b/venv/Lib/site-packages/mako/lookup.py
@@ -0,0 +1,361 @@
+# mako/lookup.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import os
+import posixpath
+import re
+import stat
+import threading
+
+from mako import exceptions
+from mako import util
+from mako.template import Template
+
+
+class TemplateCollection:
+
+    """Represent a collection of :class:`.Template` objects,
+    identifiable via URI.
+
+    A :class:`.TemplateCollection` is linked to the usage of
+    all template tags that address other templates, such
+    as ``<%include>``, ``<%namespace>``, and ``<%inherit>``.
+    The ``file`` attribute of each of those tags refers
+    to a string URI that is passed to that :class:`.Template`
+    object's :class:`.TemplateCollection` for resolution.
+
+    :class:`.TemplateCollection` is an abstract class,
+    with the usual default implementation being :class:`.TemplateLookup`.
+
+    """
+
+    def has_template(self, uri):
+        """Return ``True`` if this :class:`.TemplateLookup` is
+        capable of returning a :class:`.Template` object for the
+        given ``uri``.
+
+        :param uri: String URI of the template to be resolved.
+
+        """
+        try:
+            self.get_template(uri)
+            return True
+        except exceptions.TemplateLookupException:
+            return False
+
+    def get_template(self, uri, relativeto=None):
+        """Return a :class:`.Template` object corresponding to the given
+        ``uri``.
+
+        The default implementation raises
+        :class:`.NotImplementedError`. Implementations should
+        raise :class:`.TemplateLookupException` if the given ``uri``
+        cannot be resolved.
+
+        :param uri: String URI of the template to be resolved.
+        :param relativeto: if present, the given ``uri`` is assumed to
+         be relative to this URI.
+
+        """
+        raise NotImplementedError()
+
+    def filename_to_uri(self, uri, filename):
+        """Convert the given ``filename`` to a URI relative to
+        this :class:`.TemplateCollection`."""
+
+        return uri
+
+    def adjust_uri(self, uri, filename):
+        """Adjust the given ``uri`` based on the calling ``filename``.
+
+        When this method is called from the runtime, the
+        ``filename`` parameter is taken directly to the ``filename``
+        attribute of the calling template. Therefore a custom
+        :class:`.TemplateCollection` subclass can place any string
+        identifier desired in the ``filename`` parameter of the
+        :class:`.Template` objects it constructs and have them come back
+        here.
+
+        """
+        return uri
+
+
+class TemplateLookup(TemplateCollection):
+
+    """Represent a collection of templates that locates template source files
+    from the local filesystem.
+
+    The primary argument is the ``directories`` argument, the list of
+    directories to search:
+
+    .. sourcecode:: python
+
+        lookup = TemplateLookup(["/path/to/templates"])
+        some_template = lookup.get_template("/index.html")
+
+    The :class:`.TemplateLookup` can also be given :class:`.Template` objects
+    programatically using :meth:`.put_string` or :meth:`.put_template`:
+
+    .. sourcecode:: python
+
+        lookup = TemplateLookup()
+        lookup.put_string("base.html", '''
+            <html><body>${self.next()}</body></html>
+        ''')
+        lookup.put_string("hello.html", '''
+            <%include file='base.html'/>
+
+            Hello, world !
+        ''')
+
+
+    :param directories: A list of directory names which will be
+     searched for a particular template URI. The URI is appended
+     to each directory and the filesystem checked.
+
+    :param collection_size: Approximate size of the collection used
+     to store templates. If left at its default of ``-1``, the size
+     is unbounded, and a plain Python dictionary is used to
+     relate URI strings to :class:`.Template` instances.
+     Otherwise, a least-recently-used cache object is used which
+     will maintain the size of the collection approximately to
+     the number given.
+
+    :param filesystem_checks: When at its default value of ``True``,
+     each call to :meth:`.TemplateLookup.get_template()` will
+     compare the filesystem last modified time to the time in
+     which an existing :class:`.Template` object was created.
+     This allows the :class:`.TemplateLookup` to regenerate a
+     new :class:`.Template` whenever the original source has
+     been updated. Set this to ``False`` for a very minor
+     performance increase.
+
+    :param modulename_callable: A callable which, when present,
+     is passed the path of the source file as well as the
+     requested URI, and then returns the full path of the
+     generated Python module file. This is used to inject
+     alternate schemes for Python module location. If left at
+     its default of ``None``, the built in system of generation
+     based on ``module_directory`` plus ``uri`` is used.
+
+    All other keyword parameters available for
+    :class:`.Template` are mirrored here. When new
+    :class:`.Template` objects are created, the keywords
+    established with this :class:`.TemplateLookup` are passed on
+    to each new :class:`.Template`.
+
+    """
+
+    def __init__(
+        self,
+        directories=None,
+        module_directory=None,
+        filesystem_checks=True,
+        collection_size=-1,
+        format_exceptions=False,
+        error_handler=None,
+        output_encoding=None,
+        encoding_errors="strict",
+        cache_args=None,
+        cache_impl="beaker",
+        cache_enabled=True,
+        cache_type=None,
+        cache_dir=None,
+        cache_url=None,
+        modulename_callable=None,
+        module_writer=None,
+        default_filters=None,
+        buffer_filters=(),
+        strict_undefined=False,
+        imports=None,
+        future_imports=None,
+        enable_loop=True,
+        input_encoding=None,
+        preprocessor=None,
+        lexer_cls=None,
+        include_error_handler=None,
+    ):
+        self.directories = [
+            posixpath.normpath(d) for d in util.to_list(directories, ())
+        ]
+        self.module_directory = module_directory
+        self.modulename_callable = modulename_callable
+        self.filesystem_checks = filesystem_checks
+        self.collection_size = collection_size
+
+        if cache_args is None:
+            cache_args = {}
+        # transfer deprecated cache_* args
+        if cache_dir:
+            cache_args.setdefault("dir", cache_dir)
+        if cache_url:
+            cache_args.setdefault("url", cache_url)
+        if cache_type:
+            cache_args.setdefault("type", cache_type)
+
+        self.template_args = {
+            "format_exceptions": format_exceptions,
+            "error_handler": error_handler,
+            "include_error_handler": include_error_handler,
+            "output_encoding": output_encoding,
+            "cache_impl": cache_impl,
+            "encoding_errors": encoding_errors,
+            "input_encoding": input_encoding,
+            "module_directory": module_directory,
+            "module_writer": module_writer,
+            "cache_args": cache_args,
+            "cache_enabled": cache_enabled,
+            "default_filters": default_filters,
+            "buffer_filters": buffer_filters,
+            "strict_undefined": strict_undefined,
+            "imports": imports,
+            "future_imports": future_imports,
+            "enable_loop": enable_loop,
+            "preprocessor": preprocessor,
+            "lexer_cls": lexer_cls,
+        }
+
+        if collection_size == -1:
+            self._collection = {}
+            self._uri_cache = {}
+        else:
+            self._collection = util.LRUCache(collection_size)
+            self._uri_cache = util.LRUCache(collection_size)
+        self._mutex = threading.Lock()
+
+    def get_template(self, uri):
+        """Return a :class:`.Template` object corresponding to the given
+        ``uri``.
+
+        .. note:: The ``relativeto`` argument is not supported here at
+           the moment.
+
+        """
+
+        try:
+            if self.filesystem_checks:
+                return self._check(uri, self._collection[uri])
+            else:
+                return self._collection[uri]
+        except KeyError as e:
+            u = re.sub(r"^\/+", "", uri)
+            for dir_ in self.directories:
+                # make sure the path seperators are posix - os.altsep is empty
+                # on POSIX and cannot be used.
+                dir_ = dir_.replace(os.path.sep, posixpath.sep)
+                srcfile = posixpath.normpath(posixpath.join(dir_, u))
+                if os.path.isfile(srcfile):
+                    return self._load(srcfile, uri)
+            else:
+                raise exceptions.TopLevelLookupException(
+                    "Can't locate template for uri %r" % uri
+                ) from e
+
+    def adjust_uri(self, uri, relativeto):
+        """Adjust the given ``uri`` based on the given relative URI."""
+
+        key = (uri, relativeto)
+        if key in self._uri_cache:
+            return self._uri_cache[key]
+
+        if uri[0] == "/":
+            v = self._uri_cache[key] = uri
+        elif relativeto is not None:
+            v = self._uri_cache[key] = posixpath.join(
+                posixpath.dirname(relativeto), uri
+            )
+        else:
+            v = self._uri_cache[key] = "/" + uri
+        return v
+
+    def filename_to_uri(self, filename):
+        """Convert the given ``filename`` to a URI relative to
+        this :class:`.TemplateCollection`."""
+
+        try:
+            return self._uri_cache[filename]
+        except KeyError:
+            value = self._relativeize(filename)
+            self._uri_cache[filename] = value
+            return value
+
+    def _relativeize(self, filename):
+        """Return the portion of a filename that is 'relative'
+        to the directories in this lookup.
+
+        """
+
+        filename = posixpath.normpath(filename)
+        for dir_ in self.directories:
+            if filename[0 : len(dir_)] == dir_:
+                return filename[len(dir_) :]
+        else:
+            return None
+
+    def _load(self, filename, uri):
+        self._mutex.acquire()
+        try:
+            try:
+                # try returning from collection one
+                # more time in case concurrent thread already loaded
+                return self._collection[uri]
+            except KeyError:
+                pass
+            try:
+                if self.modulename_callable is not None:
+                    module_filename = self.modulename_callable(filename, uri)
+                else:
+                    module_filename = None
+                self._collection[uri] = template = Template(
+                    uri=uri,
+                    filename=posixpath.normpath(filename),
+                    lookup=self,
+                    module_filename=module_filename,
+                    **self.template_args,
+                )
+                return template
+            except:
+                # if compilation fails etc, ensure
+                # template is removed from collection,
+                # re-raise
+                self._collection.pop(uri, None)
+                raise
+        finally:
+            self._mutex.release()
+
+    def _check(self, uri, template):
+        if template.filename is None:
+            return template
+
+        try:
+            template_stat = os.stat(template.filename)
+            if template.module._modified_time >= template_stat[stat.ST_MTIME]:
+                return template
+            self._collection.pop(uri, None)
+            return self._load(template.filename, uri)
+        except OSError as e:
+            self._collection.pop(uri, None)
+            raise exceptions.TemplateLookupException(
+                "Can't locate template for uri %r" % uri
+            ) from e
+
+    def put_string(self, uri, text):
+        """Place a new :class:`.Template` object into this
+        :class:`.TemplateLookup`, based on the given string of
+        ``text``.
+
+        """
+        self._collection[uri] = Template(
+            text, lookup=self, uri=uri, **self.template_args
+        )
+
+    def put_template(self, uri, template):
+        """Place a new :class:`.Template` object into this
+        :class:`.TemplateLookup`, based on the given
+        :class:`.Template` object.
+
+        """
+        self._collection[uri] = template
diff --git a/venv/Lib/site-packages/mako/parsetree.py b/venv/Lib/site-packages/mako/parsetree.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d550b213137ec70ffd8f5a814bbb9ff81595c2c
--- /dev/null
+++ b/venv/Lib/site-packages/mako/parsetree.py
@@ -0,0 +1,656 @@
+# mako/parsetree.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""defines the parse tree components for Mako templates."""
+
+import re
+
+from mako import ast
+from mako import exceptions
+from mako import filters
+from mako import util
+
+
+class Node:
+
+    """base class for a Node in the parse tree."""
+
+    def __init__(self, source, lineno, pos, filename):
+        self.source = source
+        self.lineno = lineno
+        self.pos = pos
+        self.filename = filename
+
+    @property
+    def exception_kwargs(self):
+        return {
+            "source": self.source,
+            "lineno": self.lineno,
+            "pos": self.pos,
+            "filename": self.filename,
+        }
+
+    def get_children(self):
+        return []
+
+    def accept_visitor(self, visitor):
+        def traverse(node):
+            for n in node.get_children():
+                n.accept_visitor(visitor)
+
+        method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
+        method(self)
+
+
+class TemplateNode(Node):
+
+    """a 'container' node that stores the overall collection of nodes."""
+
+    def __init__(self, filename):
+        super().__init__("", 0, 0, filename)
+        self.nodes = []
+        self.page_attributes = {}
+
+    def get_children(self):
+        return self.nodes
+
+    def __repr__(self):
+        return "TemplateNode(%s, %r)" % (
+            util.sorted_dict_repr(self.page_attributes),
+            self.nodes,
+        )
+
+
+class ControlLine(Node):
+
+    """defines a control line, a line-oriented python line or end tag.
+
+    e.g.::
+
+        % if foo:
+            (markup)
+        % endif
+
+    """
+
+    has_loop_context = False
+
+    def __init__(self, keyword, isend, text, **kwargs):
+        super().__init__(**kwargs)
+        self.text = text
+        self.keyword = keyword
+        self.isend = isend
+        self.is_primary = keyword in ["for", "if", "while", "try", "with"]
+        self.nodes = []
+        if self.isend:
+            self._declared_identifiers = []
+            self._undeclared_identifiers = []
+        else:
+            code = ast.PythonFragment(text, **self.exception_kwargs)
+            self._declared_identifiers = code.declared_identifiers
+            self._undeclared_identifiers = code.undeclared_identifiers
+
+    def get_children(self):
+        return self.nodes
+
+    def declared_identifiers(self):
+        return self._declared_identifiers
+
+    def undeclared_identifiers(self):
+        return self._undeclared_identifiers
+
+    def is_ternary(self, keyword):
+        """return true if the given keyword is a ternary keyword
+        for this ControlLine"""
+
+        cases = {
+            "if": {"else", "elif"},
+            "try": {"except", "finally"},
+            "for": {"else"},
+        }
+
+        return keyword in cases.get(self.keyword, set())
+
+    def __repr__(self):
+        return "ControlLine(%r, %r, %r, %r)" % (
+            self.keyword,
+            self.text,
+            self.isend,
+            (self.lineno, self.pos),
+        )
+
+
+class Text(Node):
+    """defines plain text in the template."""
+
+    def __init__(self, content, **kwargs):
+        super().__init__(**kwargs)
+        self.content = content
+
+    def __repr__(self):
+        return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
+
+
+class Code(Node):
+    """defines a Python code block, either inline or module level.
+
+    e.g.::
+
+        inline:
+        <%
+            x = 12
+        %>
+
+        module level:
+        <%!
+            import logger
+        %>
+
+    """
+
+    def __init__(self, text, ismodule, **kwargs):
+        super().__init__(**kwargs)
+        self.text = text
+        self.ismodule = ismodule
+        self.code = ast.PythonCode(text, **self.exception_kwargs)
+
+    def declared_identifiers(self):
+        return self.code.declared_identifiers
+
+    def undeclared_identifiers(self):
+        return self.code.undeclared_identifiers
+
+    def __repr__(self):
+        return "Code(%r, %r, %r)" % (
+            self.text,
+            self.ismodule,
+            (self.lineno, self.pos),
+        )
+
+
+class Comment(Node):
+    """defines a comment line.
+
+    # this is a comment
+
+    """
+
+    def __init__(self, text, **kwargs):
+        super().__init__(**kwargs)
+        self.text = text
+
+    def __repr__(self):
+        return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
+
+
+class Expression(Node):
+    """defines an inline expression.
+
+    ${x+y}
+
+    """
+
+    def __init__(self, text, escapes, **kwargs):
+        super().__init__(**kwargs)
+        self.text = text
+        self.escapes = escapes
+        self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs)
+        self.code = ast.PythonCode(text, **self.exception_kwargs)
+
+    def declared_identifiers(self):
+        return []
+
+    def undeclared_identifiers(self):
+        # TODO: make the "filter" shortcut list configurable at parse/gen time
+        return self.code.undeclared_identifiers.union(
+            self.escapes_code.undeclared_identifiers.difference(
+                filters.DEFAULT_ESCAPES
+            )
+        ).difference(self.code.declared_identifiers)
+
+    def __repr__(self):
+        return "Expression(%r, %r, %r)" % (
+            self.text,
+            self.escapes_code.args,
+            (self.lineno, self.pos),
+        )
+
+
+class _TagMeta(type):
+    """metaclass to allow Tag to produce a subclass according to
+    its keyword"""
+
+    _classmap = {}
+
+    def __init__(cls, clsname, bases, dict_):
+        if getattr(cls, "__keyword__", None) is not None:
+            cls._classmap[cls.__keyword__] = cls
+        super().__init__(clsname, bases, dict_)
+
+    def __call__(cls, keyword, attributes, **kwargs):
+        if ":" in keyword:
+            ns, defname = keyword.split(":")
+            return type.__call__(
+                CallNamespaceTag, ns, defname, attributes, **kwargs
+            )
+
+        try:
+            cls = _TagMeta._classmap[keyword]
+        except KeyError:
+            raise exceptions.CompileException(
+                "No such tag: '%s'" % keyword,
+                source=kwargs["source"],
+                lineno=kwargs["lineno"],
+                pos=kwargs["pos"],
+                filename=kwargs["filename"],
+            )
+        return type.__call__(cls, keyword, attributes, **kwargs)
+
+
+class Tag(Node, metaclass=_TagMeta):
+    """abstract base class for tags.
+
+    e.g.::
+
+        <%sometag/>
+
+        <%someothertag>
+            stuff
+        </%someothertag>
+
+    """
+
+    __keyword__ = None
+
+    def __init__(
+        self,
+        keyword,
+        attributes,
+        expressions,
+        nonexpressions,
+        required,
+        **kwargs,
+    ):
+        r"""construct a new Tag instance.
+
+        this constructor not called directly, and is only called
+        by subclasses.
+
+        :param keyword: the tag keyword
+
+        :param attributes: raw dictionary of attribute key/value pairs
+
+        :param expressions: a set of identifiers that are legal attributes,
+         which can also contain embedded expressions
+
+        :param nonexpressions: a set of identifiers that are legal
+         attributes, which cannot contain embedded expressions
+
+        :param \**kwargs:
+         other arguments passed to the Node superclass (lineno, pos)
+
+        """
+        super().__init__(**kwargs)
+        self.keyword = keyword
+        self.attributes = attributes
+        self._parse_attributes(expressions, nonexpressions)
+        missing = [r for r in required if r not in self.parsed_attributes]
+        if len(missing):
+            raise exceptions.CompileException(
+                (
+                    "Missing attribute(s): %s"
+                    % ",".join(repr(m) for m in missing)
+                ),
+                **self.exception_kwargs,
+            )
+
+        self.parent = None
+        self.nodes = []
+
+    def is_root(self):
+        return self.parent is None
+
+    def get_children(self):
+        return self.nodes
+
+    def _parse_attributes(self, expressions, nonexpressions):
+        undeclared_identifiers = set()
+        self.parsed_attributes = {}
+        for key in self.attributes:
+            if key in expressions:
+                expr = []
+                for x in re.compile(r"(\${.+?})", re.S).split(
+                    self.attributes[key]
+                ):
+                    m = re.compile(r"^\${(.+?)}$", re.S).match(x)
+                    if m:
+                        code = ast.PythonCode(
+                            m.group(1).rstrip(), **self.exception_kwargs
+                        )
+                        # we aren't discarding "declared_identifiers" here,
+                        # which we do so that list comprehension-declared
+                        # variables aren't counted.   As yet can't find a
+                        # condition that requires it here.
+                        undeclared_identifiers = undeclared_identifiers.union(
+                            code.undeclared_identifiers
+                        )
+                        expr.append("(%s)" % m.group(1))
+                    elif x:
+                        expr.append(repr(x))
+                self.parsed_attributes[key] = " + ".join(expr) or repr("")
+            elif key in nonexpressions:
+                if re.search(r"\${.+?}", self.attributes[key]):
+                    raise exceptions.CompileException(
+                        "Attribute '%s' in tag '%s' does not allow embedded "
+                        "expressions" % (key, self.keyword),
+                        **self.exception_kwargs,
+                    )
+                self.parsed_attributes[key] = repr(self.attributes[key])
+            else:
+                raise exceptions.CompileException(
+                    "Invalid attribute for tag '%s': '%s'"
+                    % (self.keyword, key),
+                    **self.exception_kwargs,
+                )
+        self.expression_undeclared_identifiers = undeclared_identifiers
+
+    def declared_identifiers(self):
+        return []
+
+    def undeclared_identifiers(self):
+        return self.expression_undeclared_identifiers
+
+    def __repr__(self):
+        return "%s(%r, %s, %r, %r)" % (
+            self.__class__.__name__,
+            self.keyword,
+            util.sorted_dict_repr(self.attributes),
+            (self.lineno, self.pos),
+            self.nodes,
+        )
+
+
+class IncludeTag(Tag):
+    __keyword__ = "include"
+
+    def __init__(self, keyword, attributes, **kwargs):
+        super().__init__(
+            keyword,
+            attributes,
+            ("file", "import", "args"),
+            (),
+            ("file",),
+            **kwargs,
+        )
+        self.page_args = ast.PythonCode(
+            "__DUMMY(%s)" % attributes.get("args", ""), **self.exception_kwargs
+        )
+
+    def declared_identifiers(self):
+        return []
+
+    def undeclared_identifiers(self):
+        identifiers = self.page_args.undeclared_identifiers.difference(
+            {"__DUMMY"}
+        ).difference(self.page_args.declared_identifiers)
+        return identifiers.union(super().undeclared_identifiers())
+
+
+class NamespaceTag(Tag):
+    __keyword__ = "namespace"
+
+    def __init__(self, keyword, attributes, **kwargs):
+        super().__init__(
+            keyword,
+            attributes,
+            ("file",),
+            ("name", "inheritable", "import", "module"),
+            (),
+            **kwargs,
+        )
+
+        self.name = attributes.get("name", "__anon_%s" % hex(abs(id(self))))
+        if "name" not in attributes and "import" not in attributes:
+            raise exceptions.CompileException(
+                "'name' and/or 'import' attributes are required "
+                "for <%namespace>",
+                **self.exception_kwargs,
+            )
+        if "file" in attributes and "module" in attributes:
+            raise exceptions.CompileException(
+                "<%namespace> may only have one of 'file' or 'module'",
+                **self.exception_kwargs,
+            )
+
+    def declared_identifiers(self):
+        return []
+
+
+class TextTag(Tag):
+    __keyword__ = "text"
+
+    def __init__(self, keyword, attributes, **kwargs):
+        super().__init__(keyword, attributes, (), ("filter"), (), **kwargs)
+        self.filter_args = ast.ArgumentList(
+            attributes.get("filter", ""), **self.exception_kwargs
+        )
+
+    def undeclared_identifiers(self):
+        return self.filter_args.undeclared_identifiers.difference(
+            filters.DEFAULT_ESCAPES.keys()
+        ).union(self.expression_undeclared_identifiers)
+
+
+class DefTag(Tag):
+    __keyword__ = "def"
+
+    def __init__(self, keyword, attributes, **kwargs):
+        expressions = ["buffered", "cached"] + [
+            c for c in attributes if c.startswith("cache_")
+        ]
+
+        super().__init__(
+            keyword,
+            attributes,
+            expressions,
+            ("name", "filter", "decorator"),
+            ("name",),
+            **kwargs,
+        )
+        name = attributes["name"]
+        if re.match(r"^[\w_]+$", name):
+            raise exceptions.CompileException(
+                "Missing parenthesis in %def", **self.exception_kwargs
+            )
+        self.function_decl = ast.FunctionDecl(
+            "def " + name + ":pass", **self.exception_kwargs
+        )
+        self.name = self.function_decl.funcname
+        self.decorator = attributes.get("decorator", "")
+        self.filter_args = ast.ArgumentList(
+            attributes.get("filter", ""), **self.exception_kwargs
+        )
+
+    is_anonymous = False
+    is_block = False
+
+    @property
+    def funcname(self):
+        return self.function_decl.funcname
+
+    def get_argument_expressions(self, **kw):
+        return self.function_decl.get_argument_expressions(**kw)
+
+    def declared_identifiers(self):
+        return self.function_decl.allargnames
+
+    def undeclared_identifiers(self):
+        res = []
+        for c in self.function_decl.defaults:
+            res += list(
+                ast.PythonCode(
+                    c, **self.exception_kwargs
+                ).undeclared_identifiers
+            )
+        return (
+            set(res)
+            .union(
+                self.filter_args.undeclared_identifiers.difference(
+                    filters.DEFAULT_ESCAPES.keys()
+                )
+            )
+            .union(self.expression_undeclared_identifiers)
+            .difference(self.function_decl.allargnames)
+        )
+
+
+class BlockTag(Tag):
+    __keyword__ = "block"
+
+    def __init__(self, keyword, attributes, **kwargs):
+        expressions = ["buffered", "cached", "args"] + [
+            c for c in attributes if c.startswith("cache_")
+        ]
+
+        super().__init__(
+            keyword,
+            attributes,
+            expressions,
+            ("name", "filter", "decorator"),
+            (),
+            **kwargs,
+        )
+        name = attributes.get("name")
+        if name and not re.match(r"^[\w_]+$", name):
+            raise exceptions.CompileException(
+                "%block may not specify an argument signature",
+                **self.exception_kwargs,
+            )
+        if not name and attributes.get("args", None):
+            raise exceptions.CompileException(
+                "Only named %blocks may specify args", **self.exception_kwargs
+            )
+        self.body_decl = ast.FunctionArgs(
+            attributes.get("args", ""), **self.exception_kwargs
+        )
+
+        self.name = name
+        self.decorator = attributes.get("decorator", "")
+        self.filter_args = ast.ArgumentList(
+            attributes.get("filter", ""), **self.exception_kwargs
+        )
+
+    is_block = True
+
+    @property
+    def is_anonymous(self):
+        return self.name is None
+
+    @property
+    def funcname(self):
+        return self.name or "__M_anon_%d" % (self.lineno,)
+
+    def get_argument_expressions(self, **kw):
+        return self.body_decl.get_argument_expressions(**kw)
+
+    def declared_identifiers(self):
+        return self.body_decl.allargnames
+
+    def undeclared_identifiers(self):
+        return (
+            self.filter_args.undeclared_identifiers.difference(
+                filters.DEFAULT_ESCAPES.keys()
+            )
+        ).union(self.expression_undeclared_identifiers)
+
+
+class CallTag(Tag):
+    __keyword__ = "call"
+
+    def __init__(self, keyword, attributes, **kwargs):
+        super().__init__(
+            keyword, attributes, ("args"), ("expr",), ("expr",), **kwargs
+        )
+        self.expression = attributes["expr"]
+        self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
+        self.body_decl = ast.FunctionArgs(
+            attributes.get("args", ""), **self.exception_kwargs
+        )
+
+    def declared_identifiers(self):
+        return self.code.declared_identifiers.union(self.body_decl.allargnames)
+
+    def undeclared_identifiers(self):
+        return self.code.undeclared_identifiers.difference(
+            self.code.declared_identifiers
+        )
+
+
+class CallNamespaceTag(Tag):
+    def __init__(self, namespace, defname, attributes, **kwargs):
+        super().__init__(
+            namespace + ":" + defname,
+            attributes,
+            tuple(attributes.keys()) + ("args",),
+            (),
+            (),
+            **kwargs,
+        )
+
+        self.expression = "%s.%s(%s)" % (
+            namespace,
+            defname,
+            ",".join(
+                "%s=%s" % (k, v)
+                for k, v in self.parsed_attributes.items()
+                if k != "args"
+            ),
+        )
+
+        self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
+        self.body_decl = ast.FunctionArgs(
+            attributes.get("args", ""), **self.exception_kwargs
+        )
+
+    def declared_identifiers(self):
+        return self.code.declared_identifiers.union(self.body_decl.allargnames)
+
+    def undeclared_identifiers(self):
+        return self.code.undeclared_identifiers.difference(
+            self.code.declared_identifiers
+        )
+
+
+class InheritTag(Tag):
+    __keyword__ = "inherit"
+
+    def __init__(self, keyword, attributes, **kwargs):
+        super().__init__(
+            keyword, attributes, ("file",), (), ("file",), **kwargs
+        )
+
+
+class PageTag(Tag):
+    __keyword__ = "page"
+
+    def __init__(self, keyword, attributes, **kwargs):
+        expressions = [
+            "cached",
+            "args",
+            "expression_filter",
+            "enable_loop",
+        ] + [c for c in attributes if c.startswith("cache_")]
+
+        super().__init__(keyword, attributes, expressions, (), (), **kwargs)
+        self.body_decl = ast.FunctionArgs(
+            attributes.get("args", ""), **self.exception_kwargs
+        )
+        self.filter_args = ast.ArgumentList(
+            attributes.get("expression_filter", ""), **self.exception_kwargs
+        )
+
+    def declared_identifiers(self):
+        return self.body_decl.allargnames
diff --git a/venv/Lib/site-packages/mako/pygen.py b/venv/Lib/site-packages/mako/pygen.py
new file mode 100644
index 0000000000000000000000000000000000000000..baeb93a9617383448803767cd6f01a67c9df1922
--- /dev/null
+++ b/venv/Lib/site-packages/mako/pygen.py
@@ -0,0 +1,309 @@
+# mako/pygen.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""utilities for generating and formatting literal Python code."""
+
+import re
+
+from mako import exceptions
+
+
+class PythonPrinter:
+    def __init__(self, stream):
+        # indentation counter
+        self.indent = 0
+
+        # a stack storing information about why we incremented
+        # the indentation counter, to help us determine if we
+        # should decrement it
+        self.indent_detail = []
+
+        # the string of whitespace multiplied by the indent
+        # counter to produce a line
+        self.indentstring = "    "
+
+        # the stream we are writing to
+        self.stream = stream
+
+        # current line number
+        self.lineno = 1
+
+        # a list of lines that represents a buffered "block" of code,
+        # which can be later printed relative to an indent level
+        self.line_buffer = []
+
+        self.in_indent_lines = False
+
+        self._reset_multi_line_flags()
+
+        # mapping of generated python lines to template
+        # source lines
+        self.source_map = {}
+
+        self._re_space_comment = re.compile(r"^\s*#")
+        self._re_space = re.compile(r"^\s*$")
+        self._re_indent = re.compile(r":[ \t]*(?:#.*)?$")
+        self._re_compound = re.compile(r"^\s*(if|try|elif|while|for|with)")
+        self._re_indent_keyword = re.compile(
+            r"^\s*(def|class|else|elif|except|finally)"
+        )
+        self._re_unindentor = re.compile(r"^\s*(else|elif|except|finally).*\:")
+
+    def _update_lineno(self, num):
+        self.lineno += num
+
+    def start_source(self, lineno):
+        if self.lineno not in self.source_map:
+            self.source_map[self.lineno] = lineno
+
+    def write_blanks(self, num):
+        self.stream.write("\n" * num)
+        self._update_lineno(num)
+
+    def write_indented_block(self, block, starting_lineno=None):
+        """print a line or lines of python which already contain indentation.
+
+        The indentation of the total block of lines will be adjusted to that of
+        the current indent level."""
+        self.in_indent_lines = False
+        for i, l in enumerate(re.split(r"\r?\n", block)):
+            self.line_buffer.append(l)
+            if starting_lineno is not None:
+                self.start_source(starting_lineno + i)
+            self._update_lineno(1)
+
+    def writelines(self, *lines):
+        """print a series of lines of python."""
+        for line in lines:
+            self.writeline(line)
+
+    def writeline(self, line):
+        """print a line of python, indenting it according to the current
+        indent level.
+
+        this also adjusts the indentation counter according to the
+        content of the line.
+
+        """
+
+        if not self.in_indent_lines:
+            self._flush_adjusted_lines()
+            self.in_indent_lines = True
+
+        if (
+            line is None
+            or self._re_space_comment.match(line)
+            or self._re_space.match(line)
+        ):
+            hastext = False
+        else:
+            hastext = True
+
+        is_comment = line and len(line) and line[0] == "#"
+
+        # see if this line should decrease the indentation level
+        if (
+            not is_comment
+            and (not hastext or self._is_unindentor(line))
+            and self.indent > 0
+        ):
+            self.indent -= 1
+            # if the indent_detail stack is empty, the user
+            # probably put extra closures - the resulting
+            # module wont compile.
+            if len(self.indent_detail) == 0:
+                # TODO: no coverage here
+                raise exceptions.MakoException("Too many whitespace closures")
+            self.indent_detail.pop()
+
+        if line is None:
+            return
+
+        # write the line
+        self.stream.write(self._indent_line(line) + "\n")
+        self._update_lineno(len(line.split("\n")))
+
+        # see if this line should increase the indentation level.
+        # note that a line can both decrase (before printing) and
+        # then increase (after printing) the indentation level.
+
+        if self._re_indent.search(line):
+            # increment indentation count, and also
+            # keep track of what the keyword was that indented us,
+            # if it is a python compound statement keyword
+            # where we might have to look for an "unindent" keyword
+            match = self._re_compound.match(line)
+            if match:
+                # its a "compound" keyword, so we will check for "unindentors"
+                indentor = match.group(1)
+                self.indent += 1
+                self.indent_detail.append(indentor)
+            else:
+                indentor = None
+                # its not a "compound" keyword.  but lets also
+                # test for valid Python keywords that might be indenting us,
+                # else assume its a non-indenting line
+                m2 = self._re_indent_keyword.match(line)
+                if m2:
+                    self.indent += 1
+                    self.indent_detail.append(indentor)
+
+    def close(self):
+        """close this printer, flushing any remaining lines."""
+        self._flush_adjusted_lines()
+
+    def _is_unindentor(self, line):
+        """return true if the given line is an 'unindentor',
+        relative to the last 'indent' event received.
+
+        """
+
+        # no indentation detail has been pushed on; return False
+        if len(self.indent_detail) == 0:
+            return False
+
+        indentor = self.indent_detail[-1]
+
+        # the last indent keyword we grabbed is not a
+        # compound statement keyword; return False
+        if indentor is None:
+            return False
+
+        # if the current line doesnt have one of the "unindentor" keywords,
+        # return False
+        match = self._re_unindentor.match(line)
+        # if True, whitespace matches up, we have a compound indentor,
+        # and this line has an unindentor, this
+        # is probably good enough
+        return bool(match)
+
+        # should we decide that its not good enough, heres
+        # more stuff to check.
+        # keyword = match.group(1)
+
+        # match the original indent keyword
+        # for crit in [
+        #   (r'if|elif', r'else|elif'),
+        #   (r'try', r'except|finally|else'),
+        #   (r'while|for', r'else'),
+        # ]:
+        #   if re.match(crit[0], indentor) and re.match(crit[1], keyword):
+        #        return True
+
+        # return False
+
+    def _indent_line(self, line, stripspace=""):
+        """indent the given line according to the current indent level.
+
+        stripspace is a string of space that will be truncated from the
+        start of the line before indenting."""
+        if stripspace == "":
+            # Fast path optimization.
+            return self.indentstring * self.indent + line
+
+        return re.sub(
+            r"^%s" % stripspace, self.indentstring * self.indent, line
+        )
+
+    def _reset_multi_line_flags(self):
+        """reset the flags which would indicate we are in a backslashed
+        or triple-quoted section."""
+
+        self.backslashed, self.triplequoted = False, False
+
+    def _in_multi_line(self, line):
+        """return true if the given line is part of a multi-line block,
+        via backslash or triple-quote."""
+
+        # we are only looking for explicitly joined lines here, not
+        # implicit ones (i.e. brackets, braces etc.).  this is just to
+        # guard against the possibility of modifying the space inside of
+        # a literal multiline string with unfortunately placed
+        # whitespace
+
+        current_state = self.backslashed or self.triplequoted
+
+        self.backslashed = bool(re.search(r"\\$", line))
+        triples = len(re.findall(r"\"\"\"|\'\'\'", line))
+        if triples == 1 or triples % 2 != 0:
+            self.triplequoted = not self.triplequoted
+
+        return current_state
+
+    def _flush_adjusted_lines(self):
+        stripspace = None
+        self._reset_multi_line_flags()
+
+        for entry in self.line_buffer:
+            if self._in_multi_line(entry):
+                self.stream.write(entry + "\n")
+            else:
+                entry = entry.expandtabs()
+                if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
+                    stripspace = re.match(r"^([ \t]*)", entry).group(1)
+                self.stream.write(self._indent_line(entry, stripspace) + "\n")
+
+        self.line_buffer = []
+        self._reset_multi_line_flags()
+
+
+def adjust_whitespace(text):
+    """remove the left-whitespace margin of a block of Python code."""
+
+    state = [False, False]
+    (backslashed, triplequoted) = (0, 1)
+
+    def in_multi_line(line):
+        start_state = state[backslashed] or state[triplequoted]
+
+        if re.search(r"\\$", line):
+            state[backslashed] = True
+        else:
+            state[backslashed] = False
+
+        def match(reg, t):
+            m = re.match(reg, t)
+            if m:
+                return m, t[len(m.group(0)) :]
+            else:
+                return None, t
+
+        while line:
+            if state[triplequoted]:
+                m, line = match(r"%s" % state[triplequoted], line)
+                if m:
+                    state[triplequoted] = False
+                else:
+                    m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
+            else:
+                m, line = match(r"#", line)
+                if m:
+                    return start_state
+
+                m, line = match(r"\"\"\"|\'\'\'", line)
+                if m:
+                    state[triplequoted] = m.group(0)
+                    continue
+
+                m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
+
+        return start_state
+
+    def _indent_line(line, stripspace=""):
+        return re.sub(r"^%s" % stripspace, "", line)
+
+    lines = []
+    stripspace = None
+
+    for line in re.split(r"\r?\n", text):
+        if in_multi_line(line):
+            lines.append(line)
+        else:
+            line = line.expandtabs()
+            if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
+                stripspace = re.match(r"^([ \t]*)", line).group(1)
+            lines.append(_indent_line(line, stripspace))
+    return "\n".join(lines)
diff --git a/venv/Lib/site-packages/mako/pyparser.py b/venv/Lib/site-packages/mako/pyparser.py
new file mode 100644
index 0000000000000000000000000000000000000000..68218a0f0fa330628e338c01c3a6f30a382bc30a
--- /dev/null
+++ b/venv/Lib/site-packages/mako/pyparser.py
@@ -0,0 +1,217 @@
+# mako/pyparser.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""Handles parsing of Python code.
+
+Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
+module is used.
+"""
+
+import operator
+
+import _ast
+
+from mako import _ast_util
+from mako import compat
+from mako import exceptions
+from mako import util
+
+# words that cannot be assigned to (notably
+# smaller than the total keys in __builtins__)
+reserved = {"True", "False", "None", "print"}
+
+# the "id" attribute on a function node
+arg_id = operator.attrgetter("arg")
+
+util.restore__ast(_ast)
+
+
+def parse(code, mode="exec", **exception_kwargs):
+    """Parse an expression into AST"""
+
+    try:
+        return _ast_util.parse(code, "<unknown>", mode)
+    except Exception as e:
+        raise exceptions.SyntaxException(
+            "(%s) %s (%r)"
+            % (
+                compat.exception_as().__class__.__name__,
+                compat.exception_as(),
+                code[0:50],
+            ),
+            **exception_kwargs,
+        ) from e
+
+
+class FindIdentifiers(_ast_util.NodeVisitor):
+    def __init__(self, listener, **exception_kwargs):
+        self.in_function = False
+        self.in_assign_targets = False
+        self.local_ident_stack = set()
+        self.listener = listener
+        self.exception_kwargs = exception_kwargs
+
+    def _add_declared(self, name):
+        if not self.in_function:
+            self.listener.declared_identifiers.add(name)
+        else:
+            self.local_ident_stack.add(name)
+
+    def visit_ClassDef(self, node):
+        self._add_declared(node.name)
+
+    def visit_Assign(self, node):
+        # flip around the visiting of Assign so the expression gets
+        # evaluated first, in the case of a clause like "x=x+5" (x
+        # is undeclared)
+
+        self.visit(node.value)
+        in_a = self.in_assign_targets
+        self.in_assign_targets = True
+        for n in node.targets:
+            self.visit(n)
+        self.in_assign_targets = in_a
+
+    def visit_ExceptHandler(self, node):
+        if node.name is not None:
+            self._add_declared(node.name)
+        if node.type is not None:
+            self.visit(node.type)
+        for statement in node.body:
+            self.visit(statement)
+
+    def visit_Lambda(self, node, *args):
+        self._visit_function(node, True)
+
+    def visit_FunctionDef(self, node):
+        self._add_declared(node.name)
+        self._visit_function(node, False)
+
+    def _expand_tuples(self, args):
+        for arg in args:
+            if isinstance(arg, _ast.Tuple):
+                yield from arg.elts
+            else:
+                yield arg
+
+    def _visit_function(self, node, islambda):
+        # push function state onto stack.  dont log any more
+        # identifiers as "declared" until outside of the function,
+        # but keep logging identifiers as "undeclared". track
+        # argument names in each function header so they arent
+        # counted as "undeclared"
+
+        inf = self.in_function
+        self.in_function = True
+
+        local_ident_stack = self.local_ident_stack
+        self.local_ident_stack = local_ident_stack.union(
+            [arg_id(arg) for arg in self._expand_tuples(node.args.args)]
+        )
+        if islambda:
+            self.visit(node.body)
+        else:
+            for n in node.body:
+                self.visit(n)
+        self.in_function = inf
+        self.local_ident_stack = local_ident_stack
+
+    def visit_For(self, node):
+        # flip around visit
+
+        self.visit(node.iter)
+        self.visit(node.target)
+        for statement in node.body:
+            self.visit(statement)
+        for statement in node.orelse:
+            self.visit(statement)
+
+    def visit_Name(self, node):
+        if isinstance(node.ctx, _ast.Store):
+            # this is eqiuvalent to visit_AssName in
+            # compiler
+            self._add_declared(node.id)
+        elif (
+            node.id not in reserved
+            and node.id not in self.listener.declared_identifiers
+            and node.id not in self.local_ident_stack
+        ):
+            self.listener.undeclared_identifiers.add(node.id)
+
+    def visit_Import(self, node):
+        for name in node.names:
+            if name.asname is not None:
+                self._add_declared(name.asname)
+            else:
+                self._add_declared(name.name.split(".")[0])
+
+    def visit_ImportFrom(self, node):
+        for name in node.names:
+            if name.asname is not None:
+                self._add_declared(name.asname)
+            elif name.name == "*":
+                raise exceptions.CompileException(
+                    "'import *' is not supported, since all identifier "
+                    "names must be explicitly declared.  Please use the "
+                    "form 'from <modulename> import <name1>, <name2>, "
+                    "...' instead.",
+                    **self.exception_kwargs,
+                )
+            else:
+                self._add_declared(name.name)
+
+
+class FindTuple(_ast_util.NodeVisitor):
+    def __init__(self, listener, code_factory, **exception_kwargs):
+        self.listener = listener
+        self.exception_kwargs = exception_kwargs
+        self.code_factory = code_factory
+
+    def visit_Tuple(self, node):
+        for n in node.elts:
+            p = self.code_factory(n, **self.exception_kwargs)
+            self.listener.codeargs.append(p)
+            self.listener.args.append(ExpressionGenerator(n).value())
+            ldi = self.listener.declared_identifiers
+            self.listener.declared_identifiers = ldi.union(
+                p.declared_identifiers
+            )
+            lui = self.listener.undeclared_identifiers
+            self.listener.undeclared_identifiers = lui.union(
+                p.undeclared_identifiers
+            )
+
+
+class ParseFunc(_ast_util.NodeVisitor):
+    def __init__(self, listener, **exception_kwargs):
+        self.listener = listener
+        self.exception_kwargs = exception_kwargs
+
+    def visit_FunctionDef(self, node):
+        self.listener.funcname = node.name
+
+        argnames = [arg_id(arg) for arg in node.args.args]
+        if node.args.vararg:
+            argnames.append(node.args.vararg.arg)
+
+        kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs]
+        if node.args.kwarg:
+            kwargnames.append(node.args.kwarg.arg)
+        self.listener.argnames = argnames
+        self.listener.defaults = node.args.defaults  # ast
+        self.listener.kwargnames = kwargnames
+        self.listener.kwdefaults = node.args.kw_defaults
+        self.listener.varargs = node.args.vararg
+        self.listener.kwargs = node.args.kwarg
+
+
+class ExpressionGenerator:
+    def __init__(self, astnode):
+        self.generator = _ast_util.SourceGenerator(" " * 4)
+        self.generator.visit(astnode)
+
+    def value(self):
+        return "".join(self.generator.result)
diff --git a/venv/Lib/site-packages/mako/runtime.py b/venv/Lib/site-packages/mako/runtime.py
new file mode 100644
index 0000000000000000000000000000000000000000..23401b70c8917fdab24bfa050038e647af7d5c16
--- /dev/null
+++ b/venv/Lib/site-packages/mako/runtime.py
@@ -0,0 +1,968 @@
+# mako/runtime.py
+# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""provides runtime services for templates, including Context,
+Namespace, and various helper functions."""
+
+import builtins
+import functools
+import sys
+
+from mako import compat
+from mako import exceptions
+from mako import util
+
+
+class Context:
+
+    """Provides runtime namespace, output buffer, and various
+    callstacks for templates.
+
+    See :ref:`runtime_toplevel` for detail on the usage of
+    :class:`.Context`.
+
+    """
+
+    def __init__(self, buffer, **data):
+        self._buffer_stack = [buffer]
+
+        self._data = data
+
+        self._kwargs = data.copy()
+        self._with_template = None
+        self._outputting_as_unicode = None
+        self.namespaces = {}
+
+        # "capture" function which proxies to the
+        # generic "capture" function
+        self._data["capture"] = functools.partial(capture, self)
+
+        # "caller" stack used by def calls with content
+        self.caller_stack = self._data["caller"] = CallerStack()
+
+    def _set_with_template(self, t):
+        self._with_template = t
+        illegal_names = t.reserved_names.intersection(self._data)
+        if illegal_names:
+            raise exceptions.NameConflictError(
+                "Reserved words passed to render(): %s"
+                % ", ".join(illegal_names)
+            )
+
+    @property
+    def lookup(self):
+        """Return the :class:`.TemplateLookup` associated
+        with this :class:`.Context`.
+
+        """
+        return self._with_template.lookup
+
+    @property
+    def kwargs(self):
+        """Return the dictionary of top level keyword arguments associated
+        with this :class:`.Context`.
+
+        This dictionary only includes the top-level arguments passed to
+        :meth:`.Template.render`.  It does not include names produced within
+        the template execution such as local variable names or special names
+        such as ``self``, ``next``, etc.
+
+        The purpose of this dictionary is primarily for the case that
+        a :class:`.Template` accepts arguments via its ``<%page>`` tag,
+        which are normally expected to be passed via :meth:`.Template.render`,
+        except the template is being called in an inheritance context,
+        using the ``body()`` method.   :attr:`.Context.kwargs` can then be
+        used to propagate these arguments to the inheriting template::
+
+            ${next.body(**context.kwargs)}
+
+        """
+        return self._kwargs.copy()
+
+    def push_caller(self, caller):
+        """Push a ``caller`` callable onto the callstack for
+        this :class:`.Context`."""
+
+        self.caller_stack.append(caller)
+
+    def pop_caller(self):
+        """Pop a ``caller`` callable onto the callstack for this
+        :class:`.Context`."""
+
+        del self.caller_stack[-1]
+
+    def keys(self):
+        """Return a list of all names established in this :class:`.Context`."""
+
+        return list(self._data.keys())
+
+    def __getitem__(self, key):
+        if key in self._data:
+            return self._data[key]
+        else:
+            return builtins.__dict__[key]
+
+    def _push_writer(self):
+        """push a capturing buffer onto this Context and return
+        the new writer function."""
+
+        buf = util.FastEncodingBuffer()
+        self._buffer_stack.append(buf)
+        return buf.write
+
+    def _pop_buffer_and_writer(self):
+        """pop the most recent capturing buffer from this Context
+        and return the current writer after the pop.
+
+        """
+
+        buf = self._buffer_stack.pop()
+        return buf, self._buffer_stack[-1].write
+
+    def _push_buffer(self):
+        """push a capturing buffer onto this Context."""
+
+        self._push_writer()
+
+    def _pop_buffer(self):
+        """pop the most recent capturing buffer from this Context."""
+
+        return self._buffer_stack.pop()
+
+    def get(self, key, default=None):
+        """Return a value from this :class:`.Context`."""
+
+        return self._data.get(key, builtins.__dict__.get(key, default))
+
+    def write(self, string):
+        """Write a string to this :class:`.Context` object's
+        underlying output buffer."""
+
+        self._buffer_stack[-1].write(string)
+
+    def writer(self):
+        """Return the current writer function."""
+
+        return self._buffer_stack[-1].write
+
+    def _copy(self):
+        c = Context.__new__(Context)
+        c._buffer_stack = self._buffer_stack
+        c._data = self._data.copy()
+        c._kwargs = self._kwargs
+        c._with_template = self._with_template
+        c._outputting_as_unicode = self._outputting_as_unicode
+        c.namespaces = self.namespaces
+        c.caller_stack = self.caller_stack
+        return c
+
+    def _locals(self, d):
+        """Create a new :class:`.Context` with a copy of this
+        :class:`.Context`'s current state,
+        updated with the given dictionary.
+
+        The :attr:`.Context.kwargs` collection remains
+        unaffected.
+
+
+        """
+
+        if not d:
+            return self
+        c = self._copy()
+        c._data.update(d)
+        return c
+
+    def _clean_inheritance_tokens(self):
+        """create a new copy of this :class:`.Context`. with
+        tokens related to inheritance state removed."""
+
+        c = self._copy()
+        x = c._data
+        x.pop("self", None)
+        x.pop("parent", None)
+        x.pop("next", None)
+        return c
+
+
+class CallerStack(list):
+    def __init__(self):
+        self.nextcaller = None
+
+    def __nonzero__(self):
+        return self.__bool__()
+
+    def __bool__(self):
+        return len(self) and self._get_caller() and True or False
+
+    def _get_caller(self):
+        # this method can be removed once
+        # codegen MAGIC_NUMBER moves past 7
+        return self[-1]
+
+    def __getattr__(self, key):
+        return getattr(self._get_caller(), key)
+
+    def _push_frame(self):
+        frame = self.nextcaller or None
+        self.append(frame)
+        self.nextcaller = None
+        return frame
+
+    def _pop_frame(self):
+        self.nextcaller = self.pop()
+
+
+class Undefined:
+
+    """Represents an undefined value in a template.
+
+    All template modules have a constant value
+    ``UNDEFINED`` present which is an instance of this
+    object.
+
+    """
+
+    def __str__(self):
+        raise NameError("Undefined")
+
+    def __nonzero__(self):
+        return self.__bool__()
+
+    def __bool__(self):
+        return False
+
+
+UNDEFINED = Undefined()
+STOP_RENDERING = ""
+
+
+class LoopStack:
+
+    """a stack for LoopContexts that implements the context manager protocol
+    to automatically pop off the top of the stack on context exit
+    """
+
+    def __init__(self):
+        self.stack = []
+
+    def _enter(self, iterable):
+        self._push(iterable)
+        return self._top
+
+    def _exit(self):
+        self._pop()
+        return self._top
+
+    @property
+    def _top(self):
+        if self.stack:
+            return self.stack[-1]
+        else:
+            return self
+
+    def _pop(self):
+        return self.stack.pop()
+
+    def _push(self, iterable):
+        new = LoopContext(iterable)
+        if self.stack:
+            new.parent = self.stack[-1]
+        return self.stack.append(new)
+
+    def __getattr__(self, key):
+        raise exceptions.RuntimeException("No loop context is established")
+
+    def __iter__(self):
+        return iter(self._top)
+
+
+class LoopContext:
+
+    """A magic loop variable.
+    Automatically accessible in any ``% for`` block.
+
+    See the section :ref:`loop_context` for usage
+    notes.
+
+    :attr:`parent` -> :class:`.LoopContext` or ``None``
+        The parent loop, if one exists.
+    :attr:`index` -> `int`
+        The 0-based iteration count.
+    :attr:`reverse_index` -> `int`
+        The number of iterations remaining.
+    :attr:`first` -> `bool`
+        ``True`` on the first iteration, ``False`` otherwise.
+    :attr:`last` -> `bool`
+        ``True`` on the last iteration, ``False`` otherwise.
+    :attr:`even` -> `bool`
+        ``True`` when ``index`` is even.
+    :attr:`odd` -> `bool`
+        ``True`` when ``index`` is odd.
+    """
+
+    def __init__(self, iterable):
+        self._iterable = iterable
+        self.index = 0
+        self.parent = None
+
+    def __iter__(self):
+        for i in self._iterable:
+            yield i
+            self.index += 1
+
+    @util.memoized_instancemethod
+    def __len__(self):
+        return len(self._iterable)
+
+    @property
+    def reverse_index(self):
+        return len(self) - self.index - 1
+
+    @property
+    def first(self):
+        return self.index == 0
+
+    @property
+    def last(self):
+        return self.index == len(self) - 1
+
+    @property
+    def even(self):
+        return not self.odd
+
+    @property
+    def odd(self):
+        return bool(self.index % 2)
+
+    def cycle(self, *values):
+        """Cycle through values as the loop progresses."""
+        if not values:
+            raise ValueError("You must provide values to cycle through")
+        return values[self.index % len(values)]
+
+
+class _NSAttr:
+    def __init__(self, parent):
+        self.__parent = parent
+
+    def __getattr__(self, key):
+        ns = self.__parent
+        while ns:
+            if hasattr(ns.module, key):
+                return getattr(ns.module, key)
+            else:
+                ns = ns.inherits
+        raise AttributeError(key)
+
+
+class Namespace:
+
+    """Provides access to collections of rendering methods, which
+    can be local, from other templates, or from imported modules.
+
+    To access a particular rendering method referenced by a
+    :class:`.Namespace`, use plain attribute access:
+
+    .. sourcecode:: mako
+
+      ${some_namespace.foo(x, y, z)}
+
+    :class:`.Namespace` also contains several built-in attributes
+    described here.
+
+    """
+
+    def __init__(
+        self,
+        name,
+        context,
+        callables=None,
+        inherits=None,
+        populate_self=True,
+        calling_uri=None,
+    ):
+        self.name = name
+        self.context = context
+        self.inherits = inherits
+        if callables is not None:
+            self.callables = {c.__name__: c for c in callables}
+
+    callables = ()
+
+    module = None
+    """The Python module referenced by this :class:`.Namespace`.
+
+    If the namespace references a :class:`.Template`, then
+    this module is the equivalent of ``template.module``,
+    i.e. the generated module for the template.
+
+    """
+
+    template = None
+    """The :class:`.Template` object referenced by this
+        :class:`.Namespace`, if any.
+
+    """
+
+    context = None
+    """The :class:`.Context` object for this :class:`.Namespace`.
+
+    Namespaces are often created with copies of contexts that
+    contain slightly different data, particularly in inheritance
+    scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one
+    can traverse an entire chain of templates that inherit from
+    one-another.
+
+    """
+
+    filename = None
+    """The path of the filesystem file used for this
+    :class:`.Namespace`'s module or template.
+
+    If this is a pure module-based
+    :class:`.Namespace`, this evaluates to ``module.__file__``. If a
+    template-based namespace, it evaluates to the original
+    template file location.
+
+    """
+
+    uri = None
+    """The URI for this :class:`.Namespace`'s template.
+
+    I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
+
+    This is the equivalent of :attr:`.Template.uri`.
+
+    """
+
+    _templateuri = None
+
+    @util.memoized_property
+    def attr(self):
+        """Access module level attributes by name.
+
+        This accessor allows templates to supply "scalar"
+        attributes which are particularly handy in inheritance
+        relationships.
+
+        .. seealso::
+
+            :ref:`inheritance_attr`
+
+            :ref:`namespace_attr_for_includes`
+
+        """
+        return _NSAttr(self)
+
+    def get_namespace(self, uri):
+        """Return a :class:`.Namespace` corresponding to the given ``uri``.
+
+        If the given ``uri`` is a relative URI (i.e. it does not
+        contain a leading slash ``/``), the ``uri`` is adjusted to
+        be relative to the ``uri`` of the namespace itself. This
+        method is therefore mostly useful off of the built-in
+        ``local`` namespace, described in :ref:`namespace_local`.
+
+        In
+        most cases, a template wouldn't need this function, and
+        should instead use the ``<%namespace>`` tag to load
+        namespaces. However, since all ``<%namespace>`` tags are
+        evaluated before the body of a template ever runs,
+        this method can be used to locate namespaces using
+        expressions that were generated within the body code of
+        the template, or to conditionally use a particular
+        namespace.
+
+        """
+        key = (self, uri)
+        if key in self.context.namespaces:
+            return self.context.namespaces[key]
+        ns = TemplateNamespace(
+            uri,
+            self.context._copy(),
+            templateuri=uri,
+            calling_uri=self._templateuri,
+        )
+        self.context.namespaces[key] = ns
+        return ns
+
+    def get_template(self, uri):
+        """Return a :class:`.Template` from the given ``uri``.
+
+        The ``uri`` resolution is relative to the ``uri`` of this
+        :class:`.Namespace` object's :class:`.Template`.
+
+        """
+        return _lookup_template(self.context, uri, self._templateuri)
+
+    def get_cached(self, key, **kwargs):
+        """Return a value from the :class:`.Cache` referenced by this
+        :class:`.Namespace` object's :class:`.Template`.
+
+        The advantage to this method versus direct access to the
+        :class:`.Cache` is that the configuration parameters
+        declared in ``<%page>`` take effect here, thereby calling
+        up the same configured backend as that configured
+        by ``<%page>``.
+
+        """
+
+        return self.cache.get(key, **kwargs)
+
+    @property
+    def cache(self):
+        """Return the :class:`.Cache` object referenced
+        by this :class:`.Namespace` object's
+        :class:`.Template`.
+
+        """
+        return self.template.cache
+
+    def include_file(self, uri, **kwargs):
+        """Include a file at the given ``uri``."""
+
+        _include_file(self.context, uri, self._templateuri, **kwargs)
+
+    def _populate(self, d, l):
+        for ident in l:
+            if ident == "*":
+                for k, v in self._get_star():
+                    d[k] = v
+            else:
+                d[ident] = getattr(self, ident)
+
+    def _get_star(self):
+        if self.callables:
+            for key in self.callables:
+                yield (key, self.callables[key])
+
+    def __getattr__(self, key):
+        if key in self.callables:
+            val = self.callables[key]
+        elif self.inherits:
+            val = getattr(self.inherits, key)
+        else:
+            raise AttributeError(
+                "Namespace '%s' has no member '%s'" % (self.name, key)
+            )
+        setattr(self, key, val)
+        return val
+
+
+class TemplateNamespace(Namespace):
+
+    """A :class:`.Namespace` specific to a :class:`.Template` instance."""
+
+    def __init__(
+        self,
+        name,
+        context,
+        template=None,
+        templateuri=None,
+        callables=None,
+        inherits=None,
+        populate_self=True,
+        calling_uri=None,
+    ):
+        self.name = name
+        self.context = context
+        self.inherits = inherits
+        if callables is not None:
+            self.callables = {c.__name__: c for c in callables}
+
+        if templateuri is not None:
+            self.template = _lookup_template(context, templateuri, calling_uri)
+            self._templateuri = self.template.module._template_uri
+        elif template is not None:
+            self.template = template
+            self._templateuri = template.module._template_uri
+        else:
+            raise TypeError("'template' argument is required.")
+
+        if populate_self:
+            lclcallable, lclcontext = _populate_self_namespace(
+                context, self.template, self_ns=self
+            )
+
+    @property
+    def module(self):
+        """The Python module referenced by this :class:`.Namespace`.
+
+        If the namespace references a :class:`.Template`, then
+        this module is the equivalent of ``template.module``,
+        i.e. the generated module for the template.
+
+        """
+        return self.template.module
+
+    @property
+    def filename(self):
+        """The path of the filesystem file used for this
+        :class:`.Namespace`'s module or template.
+        """
+        return self.template.filename
+
+    @property
+    def uri(self):
+        """The URI for this :class:`.Namespace`'s template.
+
+        I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
+
+        This is the equivalent of :attr:`.Template.uri`.
+
+        """
+        return self.template.uri
+
+    def _get_star(self):
+        if self.callables:
+            for key in self.callables:
+                yield (key, self.callables[key])
+
+        def get(key):
+            callable_ = self.template._get_def_callable(key)
+            return functools.partial(callable_, self.context)
+
+        for k in self.template.module._exports:
+            yield (k, get(k))
+
+    def __getattr__(self, key):
+        if key in self.callables:
+            val = self.callables[key]
+        elif self.template.has_def(key):
+            callable_ = self.template._get_def_callable(key)
+            val = functools.partial(callable_, self.context)
+        elif self.inherits:
+            val = getattr(self.inherits, key)
+
+        else:
+            raise AttributeError(
+                "Namespace '%s' has no member '%s'" % (self.name, key)
+            )
+        setattr(self, key, val)
+        return val
+
+
+class ModuleNamespace(Namespace):
+
+    """A :class:`.Namespace` specific to a Python module instance."""
+
+    def __init__(
+        self,
+        name,
+        context,
+        module,
+        callables=None,
+        inherits=None,
+        populate_self=True,
+        calling_uri=None,
+    ):
+        self.name = name
+        self.context = context
+        self.inherits = inherits
+        if callables is not None:
+            self.callables = {c.__name__: c for c in callables}
+
+        mod = __import__(module)
+        for token in module.split(".")[1:]:
+            mod = getattr(mod, token)
+        self.module = mod
+
+    @property
+    def filename(self):
+        """The path of the filesystem file used for this
+        :class:`.Namespace`'s module or template.
+        """
+        return self.module.__file__
+
+    def _get_star(self):
+        if self.callables:
+            for key in self.callables:
+                yield (key, self.callables[key])
+        for key in dir(self.module):
+            if key[0] != "_":
+                callable_ = getattr(self.module, key)
+                if callable(callable_):
+                    yield key, functools.partial(callable_, self.context)
+
+    def __getattr__(self, key):
+        if key in self.callables:
+            val = self.callables[key]
+        elif hasattr(self.module, key):
+            callable_ = getattr(self.module, key)
+            val = functools.partial(callable_, self.context)
+        elif self.inherits:
+            val = getattr(self.inherits, key)
+        else:
+            raise AttributeError(
+                "Namespace '%s' has no member '%s'" % (self.name, key)
+            )
+        setattr(self, key, val)
+        return val
+
+
+def supports_caller(func):
+    """Apply a caller_stack compatibility decorator to a plain
+    Python function.
+
+    See the example in :ref:`namespaces_python_modules`.
+
+    """
+
+    def wrap_stackframe(context, *args, **kwargs):
+        context.caller_stack._push_frame()
+        try:
+            return func(context, *args, **kwargs)
+        finally:
+            context.caller_stack._pop_frame()
+
+    return wrap_stackframe
+
+
+def capture(context, callable_, *args, **kwargs):
+    """Execute the given template def, capturing the output into
+    a buffer.
+
+    See the example in :ref:`namespaces_python_modules`.
+
+    """
+
+    if not callable(callable_):
+        raise exceptions.RuntimeException(
+            "capture() function expects a callable as "
+            "its argument (i.e. capture(func, *args, **kwargs))"
+        )
+    context._push_buffer()
+    try:
+        callable_(*args, **kwargs)
+    finally:
+        buf = context._pop_buffer()
+    return buf.getvalue()
+
+
+def _decorate_toplevel(fn):
+    def decorate_render(render_fn):
+        def go(context, *args, **kw):
+            def y(*args, **kw):
+                return render_fn(context, *args, **kw)
+
+            try:
+                y.__name__ = render_fn.__name__[7:]
+            except TypeError:
+                # < Python 2.4
+                pass
+            return fn(y)(context, *args, **kw)
+
+        return go
+
+    return decorate_render
+
+
+def _decorate_inline(context, fn):
+    def decorate_render(render_fn):
+        dec = fn(render_fn)
+
+        def go(*args, **kw):
+            return dec(context, *args, **kw)
+
+        return go
+
+    return decorate_render
+
+
+def _include_file(context, uri, calling_uri, **kwargs):
+    """locate the template from the given uri and include it in
+    the current output."""
+
+    template = _lookup_template(context, uri, calling_uri)
+    (callable_, ctx) = _populate_self_namespace(
+        context._clean_inheritance_tokens(), template
+    )
+    kwargs = _kwargs_for_include(callable_, context._data, **kwargs)
+    if template.include_error_handler:
+        try:
+            callable_(ctx, **kwargs)
+        except Exception:
+            result = template.include_error_handler(ctx, compat.exception_as())
+            if not result:
+                raise
+    else:
+        callable_(ctx, **kwargs)
+
+
+def _inherit_from(context, uri, calling_uri):
+    """called by the _inherit method in template modules to set
+    up the inheritance chain at the start of a template's
+    execution."""
+
+    if uri is None:
+        return None
+    template = _lookup_template(context, uri, calling_uri)
+    self_ns = context["self"]
+    ih = self_ns
+    while ih.inherits is not None:
+        ih = ih.inherits
+    lclcontext = context._locals({"next": ih})
+    ih.inherits = TemplateNamespace(
+        "self:%s" % template.uri,
+        lclcontext,
+        template=template,
+        populate_self=False,
+    )
+    context._data["parent"] = lclcontext._data["local"] = ih.inherits
+    callable_ = getattr(template.module, "_mako_inherit", None)
+    if callable_ is not None:
+        ret = callable_(template, lclcontext)
+        if ret:
+            return ret
+
+    gen_ns = getattr(template.module, "_mako_generate_namespaces", None)
+    if gen_ns is not None:
+        gen_ns(context)
+    return (template.callable_, lclcontext)
+
+
+def _lookup_template(context, uri, relativeto):
+    lookup = context._with_template.lookup
+    if lookup is None:
+        raise exceptions.TemplateLookupException(
+            "Template '%s' has no TemplateLookup associated"
+            % context._with_template.uri
+        )
+    uri = lookup.adjust_uri(uri, relativeto)
+    try:
+        return lookup.get_template(uri)
+    except exceptions.TopLevelLookupException as e:
+        raise exceptions.TemplateLookupException(
+            str(compat.exception_as())
+        ) from e
+
+
+def _populate_self_namespace(context, template, self_ns=None):
+    if self_ns is None:
+        self_ns = TemplateNamespace(
+            "self:%s" % template.uri,
+            context,
+            template=template,
+            populate_self=False,
+        )
+    context._data["self"] = context._data["local"] = self_ns
+    if hasattr(template.module, "_mako_inherit"):
+        ret = template.module._mako_inherit(template, context)
+        if ret:
+            return ret
+    return (template.callable_, context)
+
+
+def _render(template, callable_, args, data, as_unicode=False):
+    """create a Context and return the string
+    output of the given template and template callable."""
+
+    if as_unicode:
+        buf = util.FastEncodingBuffer()
+    else:
+        buf = util.FastEncodingBuffer(
+            encoding=template.output_encoding, errors=template.encoding_errors
+        )
+    context = Context(buf, **data)
+    context._outputting_as_unicode = as_unicode
+    context._set_with_template(template)
+
+    _render_context(
+        template,
+        callable_,
+        context,
+        *args,
+        **_kwargs_for_callable(callable_, data),
+    )
+    return context._pop_buffer().getvalue()
+
+
+def _kwargs_for_callable(callable_, data):
+    argspec = compat.inspect_getargspec(callable_)
+    # for normal pages, **pageargs is usually present
+    if argspec[2]:
+        return data
+
+    # for rendering defs from the top level, figure out the args
+    namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
+    kwargs = {}
+    for arg in namedargs:
+        if arg != "context" and arg in data and arg not in kwargs:
+            kwargs[arg] = data[arg]
+    return kwargs
+
+
+def _kwargs_for_include(callable_, data, **kwargs):
+    argspec = compat.inspect_getargspec(callable_)
+    namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
+    for arg in namedargs:
+        if arg != "context" and arg in data and arg not in kwargs:
+            kwargs[arg] = data[arg]
+    return kwargs
+
+
+def _render_context(tmpl, callable_, context, *args, **kwargs):
+    import mako.template as template
+
+    # create polymorphic 'self' namespace for this
+    # template with possibly updated context
+    if not isinstance(tmpl, template.DefTemplate):
+        # if main render method, call from the base of the inheritance stack
+        (inherit, lclcontext) = _populate_self_namespace(context, tmpl)
+        _exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
+    else:
+        # otherwise, call the actual rendering method specified
+        (inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
+        _exec_template(callable_, context, args=args, kwargs=kwargs)
+
+
+def _exec_template(callable_, context, args=None, kwargs=None):
+    """execute a rendering callable given the callable, a
+    Context, and optional explicit arguments
+
+    the contextual Template will be located if it exists, and
+    the error handling options specified on that Template will
+    be interpreted here.
+    """
+    template = context._with_template
+    if template is not None and (
+        template.format_exceptions or template.error_handler
+    ):
+        try:
+            callable_(context, *args, **kwargs)
+        except Exception:
+            _render_error(template, context, compat.exception_as())
+        except:
+            e = sys.exc_info()[0]
+            _render_error(template, context, e)
+    else:
+        callable_(context, *args, **kwargs)
+
+
+def _render_error(template, context, error):
+    if template.error_handler:
+        result = template.error_handler(context, error)
+        if not result:
+            tp, value, tb = sys.exc_info()
+            if value and tb:
+                raise value.with_traceback(tb)
+            else:
+                raise error
+    else:
+        error_template = exceptions.html_error_template()
+        if context._outputting_as_unicode:
+            context._buffer_stack[:] = [util.FastEncodingBuffer()]
+        else:
+            context._buffer_stack[:] = [
+                util.FastEncodingBuffer(
+                    error_template.output_encoding,
+                    error_template.encoding_errors,
+                )
+            ]
+
+        context._set_with_template(error_template)
+        error_template.render_context(context, error=error)
diff --git a/venv/Lib/site-packages/mako/template.py b/venv/Lib/site-packages/mako/template.py
new file mode 100644
index 0000000000000000000000000000000000000000..e72915b0040c6db01059eb2957bb870bc959d397
--- /dev/null
+++ b/venv/Lib/site-packages/mako/template.py
@@ -0,0 +1,715 @@
+# mako/template.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+"""Provides the Template class, a facade for parsing, generating and executing
+template strings, as well as template runtime operations."""
+
+import json
+import os
+import re
+import shutil
+import stat
+import tempfile
+import types
+import weakref
+
+from mako import cache
+from mako import codegen
+from mako import compat
+from mako import exceptions
+from mako import runtime
+from mako import util
+from mako.lexer import Lexer
+
+
+class Template:
+    r"""Represents a compiled template.
+
+    :class:`.Template` includes a reference to the original
+    template source (via the :attr:`.source` attribute)
+    as well as the source code of the
+    generated Python module (i.e. the :attr:`.code` attribute),
+    as well as a reference to an actual Python module.
+
+    :class:`.Template` is constructed using either a literal string
+    representing the template text, or a filename representing a filesystem
+    path to a source file.
+
+    :param text: textual template source.  This argument is mutually
+     exclusive versus the ``filename`` parameter.
+
+    :param filename: filename of the source template.  This argument is
+     mutually exclusive versus the ``text`` parameter.
+
+    :param buffer_filters: string list of filters to be applied
+     to the output of ``%def``\ s which are buffered, cached, or otherwise
+     filtered, after all filters
+     defined with the ``%def`` itself have been applied. Allows the
+     creation of default expression filters that let the output
+     of return-valued ``%def``\ s "opt out" of that filtering via
+     passing special attributes or objects.
+
+    :param cache_args: Dictionary of cache configuration arguments that
+     will be passed to the :class:`.CacheImpl`.   See :ref:`caching_toplevel`.
+
+    :param cache_dir:
+
+     .. deprecated:: 0.6
+        Use the ``'dir'`` argument in the ``cache_args`` dictionary.
+        See :ref:`caching_toplevel`.
+
+    :param cache_enabled: Boolean flag which enables caching of this
+     template.  See :ref:`caching_toplevel`.
+
+    :param cache_impl: String name of a :class:`.CacheImpl` caching
+     implementation to use.   Defaults to ``'beaker'``.
+
+    :param cache_type:
+
+     .. deprecated:: 0.6
+        Use the ``'type'`` argument in the ``cache_args`` dictionary.
+        See :ref:`caching_toplevel`.
+
+    :param cache_url:
+
+     .. deprecated:: 0.6
+        Use the ``'url'`` argument in the ``cache_args`` dictionary.
+        See :ref:`caching_toplevel`.
+
+    :param default_filters: List of string filter names that will
+     be applied to all expressions.  See :ref:`filtering_default_filters`.
+
+    :param enable_loop: When ``True``, enable the ``loop`` context variable.
+     This can be set to ``False`` to support templates that may
+     be making usage of the name "``loop``".   Individual templates can
+     re-enable the "loop" context by placing the directive
+     ``enable_loop="True"`` inside the ``<%page>`` tag -- see
+     :ref:`migrating_loop`.
+
+    :param encoding_errors: Error parameter passed to ``encode()`` when
+     string encoding is performed. See :ref:`usage_unicode`.
+
+    :param error_handler: Python callable which is called whenever
+     compile or runtime exceptions occur. The callable is passed
+     the current context as well as the exception. If the
+     callable returns ``True``, the exception is considered to
+     be handled, else it is re-raised after the function
+     completes. Is used to provide custom error-rendering
+     functions.
+
+     .. seealso::
+
+        :paramref:`.Template.include_error_handler` - include-specific
+        error handler function
+
+    :param format_exceptions: if ``True``, exceptions which occur during
+     the render phase of this template will be caught and
+     formatted into an HTML error page, which then becomes the
+     rendered result of the :meth:`.render` call. Otherwise,
+     runtime exceptions are propagated outwards.
+
+    :param imports: String list of Python statements, typically individual
+     "import" lines, which will be placed into the module level
+     preamble of all generated Python modules. See the example
+     in :ref:`filtering_default_filters`.
+
+    :param future_imports: String list of names to import from `__future__`.
+     These will be concatenated into a comma-separated string and inserted
+     into the beginning of the template, e.g. ``futures_imports=['FOO',
+     'BAR']`` results in ``from __future__ import FOO, BAR``.  If you're
+     interested in using features like the new division operator, you must
+     use future_imports to convey that to the renderer, as otherwise the
+     import will not appear as the first executed statement in the generated
+     code and will therefore not have the desired effect.
+
+    :param include_error_handler: An error handler that runs when this template
+     is included within another one via the ``<%include>`` tag, and raises an
+     error.  Compare to the :paramref:`.Template.error_handler` option.
+
+     .. versionadded:: 1.0.6
+
+     .. seealso::
+
+        :paramref:`.Template.error_handler` - top-level error handler function
+
+    :param input_encoding: Encoding of the template's source code.  Can
+     be used in lieu of the coding comment. See
+     :ref:`usage_unicode` as well as :ref:`unicode_toplevel` for
+     details on source encoding.
+
+    :param lookup: a :class:`.TemplateLookup` instance that will be used
+     for all file lookups via the ``<%namespace>``,
+     ``<%include>``, and ``<%inherit>`` tags. See
+     :ref:`usage_templatelookup`.
+
+    :param module_directory: Filesystem location where generated
+     Python module files will be placed.
+
+    :param module_filename: Overrides the filename of the generated
+     Python module file. For advanced usage only.
+
+    :param module_writer: A callable which overrides how the Python
+     module is written entirely.  The callable is passed the
+     encoded source content of the module and the destination
+     path to be written to.   The default behavior of module writing
+     uses a tempfile in conjunction with a file move in order
+     to make the operation atomic.   So a user-defined module
+     writing function that mimics the default behavior would be:
+
+     .. sourcecode:: python
+
+         import tempfile
+         import os
+         import shutil
+
+         def module_writer(source, outputpath):
+             (dest, name) = \\
+                 tempfile.mkstemp(
+                     dir=os.path.dirname(outputpath)
+                 )
+
+             os.write(dest, source)
+             os.close(dest)
+             shutil.move(name, outputpath)
+
+         from mako.template import Template
+         mytemplate = Template(
+                         filename="index.html",
+                         module_directory="/path/to/modules",
+                         module_writer=module_writer
+                     )
+
+     The function is provided for unusual configurations where
+     certain platform-specific permissions or other special
+     steps are needed.
+
+    :param output_encoding: The encoding to use when :meth:`.render`
+     is called.
+     See :ref:`usage_unicode` as well as :ref:`unicode_toplevel`.
+
+    :param preprocessor: Python callable which will be passed
+     the full template source before it is parsed. The return
+     result of the callable will be used as the template source
+     code.
+
+    :param lexer_cls: A :class:`.Lexer` class used to parse
+     the template.   The :class:`.Lexer` class is used by
+     default.
+
+     .. versionadded:: 0.7.4
+
+    :param strict_undefined: Replaces the automatic usage of
+     ``UNDEFINED`` for any undeclared variables not located in
+     the :class:`.Context` with an immediate raise of
+     ``NameError``. The advantage is immediate reporting of
+     missing variables which include the name.
+
+     .. versionadded:: 0.3.6
+
+    :param uri: string URI or other identifier for this template.
+     If not provided, the ``uri`` is generated from the filesystem
+     path, or from the in-memory identity of a non-file-based
+     template. The primary usage of the ``uri`` is to provide a key
+     within :class:`.TemplateLookup`, as well as to generate the
+     file path of the generated Python module file, if
+     ``module_directory`` is specified.
+
+    """
+
+    lexer_cls = Lexer
+
+    def __init__(
+        self,
+        text=None,
+        filename=None,
+        uri=None,
+        format_exceptions=False,
+        error_handler=None,
+        lookup=None,
+        output_encoding=None,
+        encoding_errors="strict",
+        module_directory=None,
+        cache_args=None,
+        cache_impl="beaker",
+        cache_enabled=True,
+        cache_type=None,
+        cache_dir=None,
+        cache_url=None,
+        module_filename=None,
+        input_encoding=None,
+        module_writer=None,
+        default_filters=None,
+        buffer_filters=(),
+        strict_undefined=False,
+        imports=None,
+        future_imports=None,
+        enable_loop=True,
+        preprocessor=None,
+        lexer_cls=None,
+        include_error_handler=None,
+    ):
+        if uri:
+            self.module_id = re.sub(r"\W", "_", uri)
+            self.uri = uri
+        elif filename:
+            self.module_id = re.sub(r"\W", "_", filename)
+            drive, path = os.path.splitdrive(filename)
+            path = os.path.normpath(path).replace(os.path.sep, "/")
+            self.uri = path
+        else:
+            self.module_id = "memory:" + hex(id(self))
+            self.uri = self.module_id
+
+        u_norm = self.uri
+        if u_norm.startswith("/"):
+            u_norm = u_norm[1:]
+        u_norm = os.path.normpath(u_norm)
+        if u_norm.startswith(".."):
+            raise exceptions.TemplateLookupException(
+                'Template uri "%s" is invalid - '
+                "it cannot be relative outside "
+                "of the root path." % self.uri
+            )
+
+        self.input_encoding = input_encoding
+        self.output_encoding = output_encoding
+        self.encoding_errors = encoding_errors
+        self.enable_loop = enable_loop
+        self.strict_undefined = strict_undefined
+        self.module_writer = module_writer
+
+        if default_filters is None:
+            self.default_filters = ["str"]
+        else:
+            self.default_filters = default_filters
+        self.buffer_filters = buffer_filters
+
+        self.imports = imports
+        self.future_imports = future_imports
+        self.preprocessor = preprocessor
+
+        if lexer_cls is not None:
+            self.lexer_cls = lexer_cls
+
+        # if plain text, compile code in memory only
+        if text is not None:
+            (code, module) = _compile_text(self, text, filename)
+            self._code = code
+            self._source = text
+            ModuleInfo(module, None, self, filename, code, text, uri)
+        elif filename is not None:
+            # if template filename and a module directory, load
+            # a filesystem-based module file, generating if needed
+            if module_filename is not None:
+                path = module_filename
+            elif module_directory is not None:
+                path = os.path.abspath(
+                    os.path.join(
+                        os.path.normpath(module_directory), u_norm + ".py"
+                    )
+                )
+            else:
+                path = None
+            module = self._compile_from_file(path, filename)
+        else:
+            raise exceptions.RuntimeException(
+                "Template requires text or filename"
+            )
+
+        self.module = module
+        self.filename = filename
+        self.callable_ = self.module.render_body
+        self.format_exceptions = format_exceptions
+        self.error_handler = error_handler
+        self.include_error_handler = include_error_handler
+        self.lookup = lookup
+
+        self.module_directory = module_directory
+
+        self._setup_cache_args(
+            cache_impl,
+            cache_enabled,
+            cache_args,
+            cache_type,
+            cache_dir,
+            cache_url,
+        )
+
+    @util.memoized_property
+    def reserved_names(self):
+        if self.enable_loop:
+            return codegen.RESERVED_NAMES
+        else:
+            return codegen.RESERVED_NAMES.difference(["loop"])
+
+    def _setup_cache_args(
+        self,
+        cache_impl,
+        cache_enabled,
+        cache_args,
+        cache_type,
+        cache_dir,
+        cache_url,
+    ):
+        self.cache_impl = cache_impl
+        self.cache_enabled = cache_enabled
+        self.cache_args = cache_args or {}
+        # transfer deprecated cache_* args
+        if cache_type:
+            self.cache_args["type"] = cache_type
+        if cache_dir:
+            self.cache_args["dir"] = cache_dir
+        if cache_url:
+            self.cache_args["url"] = cache_url
+
+    def _compile_from_file(self, path, filename):
+        if path is not None:
+            util.verify_directory(os.path.dirname(path))
+            filemtime = os.stat(filename)[stat.ST_MTIME]
+            if (
+                not os.path.exists(path)
+                or os.stat(path)[stat.ST_MTIME] < filemtime
+            ):
+                data = util.read_file(filename)
+                _compile_module_file(
+                    self, data, filename, path, self.module_writer
+                )
+            module = compat.load_module(self.module_id, path)
+            if module._magic_number != codegen.MAGIC_NUMBER:
+                data = util.read_file(filename)
+                _compile_module_file(
+                    self, data, filename, path, self.module_writer
+                )
+                module = compat.load_module(self.module_id, path)
+            ModuleInfo(module, path, self, filename, None, None, None)
+        else:
+            # template filename and no module directory, compile code
+            # in memory
+            data = util.read_file(filename)
+            code, module = _compile_text(self, data, filename)
+            self._source = None
+            self._code = code
+            ModuleInfo(module, None, self, filename, code, None, None)
+        return module
+
+    @property
+    def source(self):
+        """Return the template source code for this :class:`.Template`."""
+
+        return _get_module_info_from_callable(self.callable_).source
+
+    @property
+    def code(self):
+        """Return the module source code for this :class:`.Template`."""
+
+        return _get_module_info_from_callable(self.callable_).code
+
+    @util.memoized_property
+    def cache(self):
+        return cache.Cache(self)
+
+    @property
+    def cache_dir(self):
+        return self.cache_args["dir"]
+
+    @property
+    def cache_url(self):
+        return self.cache_args["url"]
+
+    @property
+    def cache_type(self):
+        return self.cache_args["type"]
+
+    def render(self, *args, **data):
+        """Render the output of this template as a string.
+
+        If the template specifies an output encoding, the string
+        will be encoded accordingly, else the output is raw (raw
+        output uses `StringIO` and can't handle multibyte
+        characters). A :class:`.Context` object is created corresponding
+        to the given data. Arguments that are explicitly declared
+        by this template's internal rendering method are also
+        pulled from the given ``*args``, ``**data`` members.
+
+        """
+        return runtime._render(self, self.callable_, args, data)
+
+    def render_unicode(self, *args, **data):
+        """Render the output of this template as a unicode object."""
+
+        return runtime._render(
+            self, self.callable_, args, data, as_unicode=True
+        )
+
+    def render_context(self, context, *args, **kwargs):
+        """Render this :class:`.Template` with the given context.
+
+        The data is written to the context's buffer.
+
+        """
+        if getattr(context, "_with_template", None) is None:
+            context._set_with_template(self)
+        runtime._render_context(self, self.callable_, context, *args, **kwargs)
+
+    def has_def(self, name):
+        return hasattr(self.module, "render_%s" % name)
+
+    def get_def(self, name):
+        """Return a def of this template as a :class:`.DefTemplate`."""
+
+        return DefTemplate(self, getattr(self.module, "render_%s" % name))
+
+    def list_defs(self):
+        """return a list of defs in the template.
+
+        .. versionadded:: 1.0.4
+
+        """
+        return [i[7:] for i in dir(self.module) if i[:7] == "render_"]
+
+    def _get_def_callable(self, name):
+        return getattr(self.module, "render_%s" % name)
+
+    @property
+    def last_modified(self):
+        return self.module._modified_time
+
+
+class ModuleTemplate(Template):
+
+    """A Template which is constructed given an existing Python module.
+
+    e.g.::
+
+         t = Template("this is a template")
+         f = file("mymodule.py", "w")
+         f.write(t.code)
+         f.close()
+
+         import mymodule
+
+         t = ModuleTemplate(mymodule)
+         print(t.render())
+
+    """
+
+    def __init__(
+        self,
+        module,
+        module_filename=None,
+        template=None,
+        template_filename=None,
+        module_source=None,
+        template_source=None,
+        output_encoding=None,
+        encoding_errors="strict",
+        format_exceptions=False,
+        error_handler=None,
+        lookup=None,
+        cache_args=None,
+        cache_impl="beaker",
+        cache_enabled=True,
+        cache_type=None,
+        cache_dir=None,
+        cache_url=None,
+        include_error_handler=None,
+    ):
+        self.module_id = re.sub(r"\W", "_", module._template_uri)
+        self.uri = module._template_uri
+        self.input_encoding = module._source_encoding
+        self.output_encoding = output_encoding
+        self.encoding_errors = encoding_errors
+        self.enable_loop = module._enable_loop
+
+        self.module = module
+        self.filename = template_filename
+        ModuleInfo(
+            module,
+            module_filename,
+            self,
+            template_filename,
+            module_source,
+            template_source,
+            module._template_uri,
+        )
+
+        self.callable_ = self.module.render_body
+        self.format_exceptions = format_exceptions
+        self.error_handler = error_handler
+        self.include_error_handler = include_error_handler
+        self.lookup = lookup
+        self._setup_cache_args(
+            cache_impl,
+            cache_enabled,
+            cache_args,
+            cache_type,
+            cache_dir,
+            cache_url,
+        )
+
+
+class DefTemplate(Template):
+
+    """A :class:`.Template` which represents a callable def in a parent
+    template."""
+
+    def __init__(self, parent, callable_):
+        self.parent = parent
+        self.callable_ = callable_
+        self.output_encoding = parent.output_encoding
+        self.module = parent.module
+        self.encoding_errors = parent.encoding_errors
+        self.format_exceptions = parent.format_exceptions
+        self.error_handler = parent.error_handler
+        self.include_error_handler = parent.include_error_handler
+        self.enable_loop = parent.enable_loop
+        self.lookup = parent.lookup
+
+    def get_def(self, name):
+        return self.parent.get_def(name)
+
+
+class ModuleInfo:
+
+    """Stores information about a module currently loaded into
+    memory, provides reverse lookups of template source, module
+    source code based on a module's identifier.
+
+    """
+
+    _modules = weakref.WeakValueDictionary()
+
+    def __init__(
+        self,
+        module,
+        module_filename,
+        template,
+        template_filename,
+        module_source,
+        template_source,
+        template_uri,
+    ):
+        self.module = module
+        self.module_filename = module_filename
+        self.template_filename = template_filename
+        self.module_source = module_source
+        self.template_source = template_source
+        self.template_uri = template_uri
+        self._modules[module.__name__] = template._mmarker = self
+        if module_filename:
+            self._modules[module_filename] = self
+
+    @classmethod
+    def get_module_source_metadata(cls, module_source, full_line_map=False):
+        source_map = re.search(
+            r"__M_BEGIN_METADATA(.+?)__M_END_METADATA", module_source, re.S
+        ).group(1)
+        source_map = json.loads(source_map)
+        source_map["line_map"] = {
+            int(k): int(v) for k, v in source_map["line_map"].items()
+        }
+        if full_line_map:
+            f_line_map = source_map["full_line_map"] = []
+            line_map = source_map["line_map"]
+
+            curr_templ_line = 1
+            for mod_line in range(1, max(line_map)):
+                if mod_line in line_map:
+                    curr_templ_line = line_map[mod_line]
+                f_line_map.append(curr_templ_line)
+        return source_map
+
+    @property
+    def code(self):
+        if self.module_source is not None:
+            return self.module_source
+        else:
+            return util.read_python_file(self.module_filename)
+
+    @property
+    def source(self):
+        if self.template_source is None:
+            data = util.read_file(self.template_filename)
+            if self.module._source_encoding:
+                return data.decode(self.module._source_encoding)
+            else:
+                return data
+
+        elif self.module._source_encoding and not isinstance(
+            self.template_source, str
+        ):
+            return self.template_source.decode(self.module._source_encoding)
+        else:
+            return self.template_source
+
+
+def _compile(template, text, filename, generate_magic_comment):
+    lexer = template.lexer_cls(
+        text,
+        filename,
+        input_encoding=template.input_encoding,
+        preprocessor=template.preprocessor,
+    )
+    node = lexer.parse()
+    source = codegen.compile(
+        node,
+        template.uri,
+        filename,
+        default_filters=template.default_filters,
+        buffer_filters=template.buffer_filters,
+        imports=template.imports,
+        future_imports=template.future_imports,
+        source_encoding=lexer.encoding,
+        generate_magic_comment=generate_magic_comment,
+        strict_undefined=template.strict_undefined,
+        enable_loop=template.enable_loop,
+        reserved_names=template.reserved_names,
+    )
+    return source, lexer
+
+
+def _compile_text(template, text, filename):
+    identifier = template.module_id
+    source, lexer = _compile(
+        template, text, filename, generate_magic_comment=False
+    )
+
+    cid = identifier
+    module = types.ModuleType(cid)
+    code = compile(source, cid, "exec")
+
+    # this exec() works for 2.4->3.3.
+    exec(code, module.__dict__, module.__dict__)
+    return (source, module)
+
+
+def _compile_module_file(template, text, filename, outputpath, module_writer):
+    source, lexer = _compile(
+        template, text, filename, generate_magic_comment=True
+    )
+
+    if isinstance(source, str):
+        source = source.encode(lexer.encoding or "ascii")
+
+    if module_writer:
+        module_writer(source, outputpath)
+    else:
+        # make tempfiles in the same location as the ultimate
+        # location.   this ensures they're on the same filesystem,
+        # avoiding synchronization issues.
+        (dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
+
+        os.write(dest, source)
+        os.close(dest)
+        shutil.move(name, outputpath)
+
+
+def _get_module_info_from_callable(callable_):
+    return _get_module_info(callable_.__globals__["__name__"])
+
+
+def _get_module_info(filename):
+    return ModuleInfo._modules[filename]
diff --git a/venv/Lib/site-packages/mako/testing/__init__.py b/venv/Lib/site-packages/mako/testing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/Lib/site-packages/mako/testing/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/mako/testing/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0f5b7e0c95f392fa1d16e84530d2e4039429cd8
Binary files /dev/null and b/venv/Lib/site-packages/mako/testing/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/testing/__pycache__/_config.cpython-311.pyc b/venv/Lib/site-packages/mako/testing/__pycache__/_config.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a101015a9fbe49d0a0d8579e72624296d212cafc
Binary files /dev/null and b/venv/Lib/site-packages/mako/testing/__pycache__/_config.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/testing/__pycache__/assertions.cpython-311.pyc b/venv/Lib/site-packages/mako/testing/__pycache__/assertions.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0123ea1eb76a4f80128a053ccd0c97e248f25aac
Binary files /dev/null and b/venv/Lib/site-packages/mako/testing/__pycache__/assertions.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/testing/__pycache__/config.cpython-311.pyc b/venv/Lib/site-packages/mako/testing/__pycache__/config.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e3fb0d77a85daa9c483cc86be112d489a245cdb
Binary files /dev/null and b/venv/Lib/site-packages/mako/testing/__pycache__/config.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/testing/__pycache__/exclusions.cpython-311.pyc b/venv/Lib/site-packages/mako/testing/__pycache__/exclusions.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3706ad458d0b669928d020505fc8d7d7f9d2fccd
Binary files /dev/null and b/venv/Lib/site-packages/mako/testing/__pycache__/exclusions.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/testing/__pycache__/fixtures.cpython-311.pyc b/venv/Lib/site-packages/mako/testing/__pycache__/fixtures.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb6bf3128d022f9d34d1332a4939a6092785501e
Binary files /dev/null and b/venv/Lib/site-packages/mako/testing/__pycache__/fixtures.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/testing/__pycache__/helpers.cpython-311.pyc b/venv/Lib/site-packages/mako/testing/__pycache__/helpers.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4e5862bb8ed9087aad258957f1291b546b27500
Binary files /dev/null and b/venv/Lib/site-packages/mako/testing/__pycache__/helpers.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/mako/testing/_config.py b/venv/Lib/site-packages/mako/testing/_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ee3d0a6ed770aba19adb757ffd552d504c5b9e2
--- /dev/null
+++ b/venv/Lib/site-packages/mako/testing/_config.py
@@ -0,0 +1,128 @@
+import configparser
+import dataclasses
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Callable
+from typing import ClassVar
+from typing import Optional
+from typing import Union
+
+from .helpers import make_path
+
+
+class ConfigError(BaseException):
+    pass
+
+
+class MissingConfig(ConfigError):
+    pass
+
+
+class MissingConfigSection(ConfigError):
+    pass
+
+
+class MissingConfigItem(ConfigError):
+    pass
+
+
+class ConfigValueTypeError(ConfigError):
+    pass
+
+
+class _GetterDispatch:
+    def __init__(self, initialdata, default_getter: Callable):
+        self.default_getter = default_getter
+        self.data = initialdata
+
+    def get_fn_for_type(self, type_):
+        return self.data.get(type_, self.default_getter)
+
+    def get_typed_value(self, type_, name):
+        get_fn = self.get_fn_for_type(type_)
+        return get_fn(name)
+
+
+def _parse_cfg_file(filespec: Union[Path, str]):
+    cfg = configparser.ConfigParser()
+    try:
+        filepath = make_path(filespec, check_exists=True)
+    except FileNotFoundError as e:
+        raise MissingConfig(f"No config file found at {filespec}") from e
+    else:
+        with open(filepath, encoding="utf-8") as f:
+            cfg.read_file(f)
+        return cfg
+
+
+def _build_getter(cfg_obj, cfg_section, method, converter=None):
+    def caller(option, **kwargs):
+        try:
+            rv = getattr(cfg_obj, method)(cfg_section, option, **kwargs)
+        except configparser.NoSectionError as nse:
+            raise MissingConfigSection(
+                f"No config section named {cfg_section}"
+            ) from nse
+        except configparser.NoOptionError as noe:
+            raise MissingConfigItem(f"No config item for {option}") from noe
+        except ValueError as ve:
+            # ConfigParser.getboolean, .getint, .getfloat raise ValueError
+            # on bad types
+            raise ConfigValueTypeError(
+                f"Wrong value type for {option}"
+            ) from ve
+        else:
+            if converter:
+                try:
+                    rv = converter(rv)
+                except Exception as e:
+                    raise ConfigValueTypeError(
+                        f"Wrong value type for {option}"
+                    ) from e
+            return rv
+
+    return caller
+
+
+def _build_getter_dispatch(cfg_obj, cfg_section, converters=None):
+    converters = converters or {}
+
+    default_getter = _build_getter(cfg_obj, cfg_section, "get")
+
+    # support ConfigParser builtins
+    getters = {
+        int: _build_getter(cfg_obj, cfg_section, "getint"),
+        bool: _build_getter(cfg_obj, cfg_section, "getboolean"),
+        float: _build_getter(cfg_obj, cfg_section, "getfloat"),
+        str: default_getter,
+    }
+
+    # use ConfigParser.get and convert value
+    getters.update(
+        {
+            type_: _build_getter(
+                cfg_obj, cfg_section, "get", converter=converter_fn
+            )
+            for type_, converter_fn in converters.items()
+        }
+    )
+
+    return _GetterDispatch(getters, default_getter)
+
+
+@dataclass
+class ReadsCfg:
+    section_header: ClassVar[str]
+    converters: ClassVar[Optional[dict]] = None
+
+    @classmethod
+    def from_cfg_file(cls, filespec: Union[Path, str]):
+        cfg = _parse_cfg_file(filespec)
+        dispatch = _build_getter_dispatch(
+            cfg, cls.section_header, converters=cls.converters
+        )
+        kwargs = {
+            field.name: dispatch.get_typed_value(field.type, field.name)
+            for field in dataclasses.fields(cls)
+        }
+        return cls(**kwargs)
diff --git a/venv/Lib/site-packages/mako/testing/assertions.py b/venv/Lib/site-packages/mako/testing/assertions.py
new file mode 100644
index 0000000000000000000000000000000000000000..22221cd20c4a65054ad0f6fec17640255671a81d
--- /dev/null
+++ b/venv/Lib/site-packages/mako/testing/assertions.py
@@ -0,0 +1,166 @@
+import contextlib
+import re
+import sys
+
+
+def eq_(a, b, msg=None):
+    """Assert a == b, with repr messaging on failure."""
+    assert a == b, msg or "%r != %r" % (a, b)
+
+
+def ne_(a, b, msg=None):
+    """Assert a != b, with repr messaging on failure."""
+    assert a != b, msg or "%r == %r" % (a, b)
+
+
+def in_(a, b, msg=None):
+    """Assert a in b, with repr messaging on failure."""
+    assert a in b, msg or "%r not in %r" % (a, b)
+
+
+def not_in(a, b, msg=None):
+    """Assert a in not b, with repr messaging on failure."""
+    assert a not in b, msg or "%r is in %r" % (a, b)
+
+
+def _assert_proper_exception_context(exception):
+    """assert that any exception we're catching does not have a __context__
+    without a __cause__, and that __suppress_context__ is never set.
+
+    Python 3 will report nested as exceptions as "during the handling of
+    error X, error Y occurred". That's not what we want to do. We want
+    these exceptions in a cause chain.
+
+    """
+
+    if (
+        exception.__context__ is not exception.__cause__
+        and not exception.__suppress_context__
+    ):
+        assert False, (
+            "Exception %r was correctly raised but did not set a cause, "
+            "within context %r as its cause."
+            % (exception, exception.__context__)
+        )
+
+
+def _assert_proper_cause_cls(exception, cause_cls):
+    """assert that any exception we're catching does not have a __context__
+    without a __cause__, and that __suppress_context__ is never set.
+
+    Python 3 will report nested as exceptions as "during the handling of
+    error X, error Y occurred". That's not what we want to do. We want
+    these exceptions in a cause chain.
+
+    """
+    assert isinstance(exception.__cause__, cause_cls), (
+        "Exception %r was correctly raised but has cause %r, which does not "
+        "have the expected cause type %r."
+        % (exception, exception.__cause__, cause_cls)
+    )
+
+
+def assert_raises(except_cls, callable_, *args, **kw):
+    return _assert_raises(except_cls, callable_, args, kw)
+
+
+def assert_raises_with_proper_context(except_cls, callable_, *args, **kw):
+    return _assert_raises(except_cls, callable_, args, kw, check_context=True)
+
+
+def assert_raises_with_given_cause(
+    except_cls, cause_cls, callable_, *args, **kw
+):
+    return _assert_raises(except_cls, callable_, args, kw, cause_cls=cause_cls)
+
+
+def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
+    return _assert_raises(except_cls, callable_, args, kwargs, msg=msg)
+
+
+def assert_raises_message_with_proper_context(
+    except_cls, msg, callable_, *args, **kwargs
+):
+    return _assert_raises(
+        except_cls, callable_, args, kwargs, msg=msg, check_context=True
+    )
+
+
+def assert_raises_message_with_given_cause(
+    except_cls, msg, cause_cls, callable_, *args, **kwargs
+):
+    return _assert_raises(
+        except_cls, callable_, args, kwargs, msg=msg, cause_cls=cause_cls
+    )
+
+
+def _assert_raises(
+    except_cls,
+    callable_,
+    args,
+    kwargs,
+    msg=None,
+    check_context=False,
+    cause_cls=None,
+):
+    with _expect_raises(except_cls, msg, check_context, cause_cls) as ec:
+        callable_(*args, **kwargs)
+    return ec.error
+
+
+class _ErrorContainer:
+    error = None
+
+
+@contextlib.contextmanager
+def _expect_raises(except_cls, msg=None, check_context=False, cause_cls=None):
+    ec = _ErrorContainer()
+    if check_context:
+        are_we_already_in_a_traceback = sys.exc_info()[0]
+    try:
+        yield ec
+        success = False
+    except except_cls as err:
+        ec.error = err
+        success = True
+        if msg is not None:
+            # I'm often pdbing here, and "err" above isn't
+            # in scope, so assign the string explicitly
+            error_as_string = str(err)
+            assert re.search(msg, error_as_string, re.UNICODE), "%r !~ %s" % (
+                msg,
+                error_as_string,
+            )
+        if cause_cls is not None:
+            _assert_proper_cause_cls(err, cause_cls)
+        if check_context and not are_we_already_in_a_traceback:
+            _assert_proper_exception_context(err)
+        print(str(err).encode("utf-8"))
+
+    # it's generally a good idea to not carry traceback objects outside
+    # of the except: block, but in this case especially we seem to have
+    # hit some bug in either python 3.10.0b2 or greenlet or both which
+    # this seems to fix:
+    # https://github.com/python-greenlet/greenlet/issues/242
+    del ec
+
+    # assert outside the block so it works for AssertionError too !
+    assert success, "Callable did not raise an exception"
+
+
+def expect_raises(except_cls, check_context=False):
+    return _expect_raises(except_cls, check_context=check_context)
+
+
+def expect_raises_message(except_cls, msg, check_context=False):
+    return _expect_raises(except_cls, msg=msg, check_context=check_context)
+
+
+def expect_raises_with_proper_context(except_cls, check_context=True):
+    return _expect_raises(except_cls, check_context=check_context)
+
+
+def expect_raises_message_with_proper_context(
+    except_cls, msg, check_context=True
+):
+    return _expect_raises(except_cls, msg=msg, check_context=check_context)
diff --git a/venv/Lib/site-packages/mako/testing/config.py b/venv/Lib/site-packages/mako/testing/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..b77d0c08084b0a9b1b5e7395e1bd54895ba689e6
--- /dev/null
+++ b/venv/Lib/site-packages/mako/testing/config.py
@@ -0,0 +1,17 @@
+from dataclasses import dataclass
+from pathlib import Path
+
+from ._config import ReadsCfg
+from .helpers import make_path
+
+
+@dataclass
+class Config(ReadsCfg):
+    module_base: Path
+    template_base: Path
+
+    section_header = "mako_testing"
+    converters = {Path: make_path}
+
+
+config = Config.from_cfg_file("./setup.cfg")
diff --git a/venv/Lib/site-packages/mako/testing/exclusions.py b/venv/Lib/site-packages/mako/testing/exclusions.py
new file mode 100644
index 0000000000000000000000000000000000000000..37b2d14a395f5c27ed608955860471d597e2ae3d
--- /dev/null
+++ b/venv/Lib/site-packages/mako/testing/exclusions.py
@@ -0,0 +1,80 @@
+import pytest
+
+from mako.ext.beaker_cache import has_beaker
+from mako.util import update_wrapper
+
+
+try:
+    import babel.messages.extract as babel
+except ImportError:
+    babel = None
+
+
+try:
+    import lingua
+except ImportError:
+    lingua = None
+
+
+try:
+    import dogpile.cache  # noqa
+except ImportError:
+    has_dogpile_cache = False
+else:
+    has_dogpile_cache = True
+
+
+requires_beaker = pytest.mark.skipif(
+    not has_beaker, reason="Beaker is required for these tests."
+)
+
+
+requires_babel = pytest.mark.skipif(
+    babel is None, reason="babel not installed: skipping babelplugin test"
+)
+
+
+requires_lingua = pytest.mark.skipif(
+    lingua is None, reason="lingua not installed: skipping linguaplugin test"
+)
+
+
+requires_dogpile_cache = pytest.mark.skipif(
+    not has_dogpile_cache,
+    reason="dogpile.cache is required to run these tests",
+)
+
+
+def _pygments_version():
+    try:
+        import pygments
+
+        version = pygments.__version__
+    except:
+        version = "0"
+    return version
+
+
+requires_pygments_14 = pytest.mark.skipif(
+    _pygments_version() < "1.4", reason="Requires pygments 1.4 or greater"
+)
+
+
+# def requires_pygments_14(fn):
+
+#     return skip_if(
+#         lambda: version < "1.4", "Requires pygments 1.4 or greater"
+#     )(fn)
+
+
+def requires_no_pygments_exceptions(fn):
+    def go(*arg, **kw):
+        from mako import exceptions
+
+        exceptions._install_fallback()
+        try:
+            return fn(*arg, **kw)
+        finally:
+            exceptions._install_highlighting()
+
+    return update_wrapper(go, fn)
diff --git a/venv/Lib/site-packages/mako/testing/fixtures.py b/venv/Lib/site-packages/mako/testing/fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..01e996171d16be446ee849ceafafaf2cf699f256
--- /dev/null
+++ b/venv/Lib/site-packages/mako/testing/fixtures.py
@@ -0,0 +1,119 @@
+import os
+
+from mako.cache import CacheImpl
+from mako.cache import register_plugin
+from mako.template import Template
+from .assertions import eq_
+from .config import config
+
+
+class TemplateTest:
+    def _file_template(self, filename, **kw):
+        filepath = self._file_path(filename)
+        return Template(
+            uri=filename,
+            filename=filepath,
+            module_directory=config.module_base,
+            **kw,
+        )
+
+    def _file_path(self, filename):
+        name, ext = os.path.splitext(filename)
+        py3k_path = os.path.join(config.template_base, name + "_py3k" + ext)
+        if os.path.exists(py3k_path):
+            return py3k_path
+
+        return os.path.join(config.template_base, filename)
+
+    def _do_file_test(
+        self,
+        filename,
+        expected,
+        filters=None,
+        unicode_=True,
+        template_args=None,
+        **kw,
+    ):
+        t1 = self._file_template(filename, **kw)
+        self._do_test(
+            t1,
+            expected,
+            filters=filters,
+            unicode_=unicode_,
+            template_args=template_args,
+        )
+
+    def _do_memory_test(
+        self,
+        source,
+        expected,
+        filters=None,
+        unicode_=True,
+        template_args=None,
+        **kw,
+    ):
+        t1 = Template(text=source, **kw)
+        self._do_test(
+            t1,
+            expected,
+            filters=filters,
+            unicode_=unicode_,
+            template_args=template_args,
+        )
+
+    def _do_test(
+        self,
+        template,
+        expected,
+        filters=None,
+        template_args=None,
+        unicode_=True,
+    ):
+        if template_args is None:
+            template_args = {}
+        if unicode_:
+            output = template.render_unicode(**template_args)
+        else:
+            output = template.render(**template_args)
+
+        if filters:
+            output = filters(output)
+        eq_(output, expected)
+
+    def indicates_unbound_local_error(self, rendered_output, unbound_var):
+        var = f"&#39;{unbound_var}&#39;"
+        error_msgs = (
+            # < 3.11
+            f"local variable {var} referenced before assignment",
+            # >= 3.11
+            f"cannot access local variable {var} where it is not associated",
+        )
+        return any((msg in rendered_output) for msg in error_msgs)
+
+
+class PlainCacheImpl(CacheImpl):
+    """Simple memory cache impl so that tests which
+    use caching can run without beaker."""
+
+    def __init__(self, cache):
+        self.cache = cache
+        self.data = {}
+
+    def get_or_create(self, key, creation_function, **kw):
+        if key in self.data:
+            return self.data[key]
+        else:
+            self.data[key] = data = creation_function(**kw)
+            return data
+
+    def put(self, key, value, **kw):
+        self.data[key] = value
+
+    def get(self, key, **kw):
+        return self.data[key]
+
+    def invalidate(self, key, **kw):
+        del self.data[key]
+
+
+register_plugin("plain", __name__, "PlainCacheImpl")
diff --git a/venv/Lib/site-packages/mako/testing/helpers.py b/venv/Lib/site-packages/mako/testing/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..77cca367251663cf525dd75c7fe01f061eac7480
--- /dev/null
+++ b/venv/Lib/site-packages/mako/testing/helpers.py
@@ -0,0 +1,67 @@
+import contextlib
+import pathlib
+from pathlib import Path
+import re
+import time
+from typing import Union
+from unittest import mock
+
+
+def flatten_result(result):
+    return re.sub(r"[\s\r\n]+", " ", result).strip()
+
+
+def result_lines(result):
+    return [
+        x.strip()
+        for x in re.split(r"\r?\n", re.sub(r" +", " ", result))
+        if x.strip() != ""
+    ]
+
+
+def make_path(
+    filespec: Union[Path, str],
+    make_absolute: bool = True,
+    check_exists: bool = False,
+) -> Path:
+    path = Path(filespec)
+    if make_absolute:
+        path = path.resolve(strict=check_exists)
+    if check_exists and (not path.exists()):
+        raise FileNotFoundError(f"No file or directory at {filespec}")
+    return path
+
+
+def _unlink_path(path, missing_ok=False):
+    # Replicate 3.8+ functionality in 3.7
+    cm = contextlib.nullcontext()
+    if missing_ok:
+        cm = contextlib.suppress(FileNotFoundError)
+
+    with cm:
+        path.unlink()
+
+
+def replace_file_with_dir(pathspec):
+    path = pathlib.Path(pathspec)
+    _unlink_path(path, missing_ok=True)
+    path.mkdir(exist_ok=True)
+    return path
+
+
+def file_with_template_code(filespec):
+    with open(filespec, "w") as f:
+        f.write(
+            """
+i am an artificial template just for you
+"""
+        )
+    return filespec
+
+
+@contextlib.contextmanager
+def rewind_compile_time(hours=1):
+    rewound = time.time() - (hours * 3_600)
+    with mock.patch("mako.codegen.time") as codegen_time:
+        codegen_time.time.return_value = rewound
+        yield
diff --git a/venv/Lib/site-packages/mako/util.py b/venv/Lib/site-packages/mako/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..991235b343ce8db95408ec1aa2e13c9a94a66b2b
--- /dev/null
+++ b/venv/Lib/site-packages/mako/util.py
@@ -0,0 +1,388 @@
+# mako/util.py
+# Copyright 2006-2023 the Mako authors and contributors <see AUTHORS file>
+#
+# This module is part of Mako and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+from ast import parse
+import codecs
+import collections
+import operator
+import os
+import re
+import timeit
+
+from .compat import importlib_metadata_get
+
+
+def update_wrapper(decorated, fn):
+    decorated.__wrapped__ = fn
+    decorated.__name__ = fn.__name__
+    return decorated
+
+
+class PluginLoader:
+    def __init__(self, group):
+        self.group = group
+        self.impls = {}
+
+    def load(self, name):
+        if name in self.impls:
+            return self.impls[name]()
+
+        for impl in importlib_metadata_get(self.group):
+            if impl.name == name:
+                self.impls[name] = impl.load
+                return impl.load()
+
+        from mako import exceptions
+
+        raise exceptions.RuntimeException(
+            "Can't load plugin %s %s" % (self.group, name)
+        )
+
+    def register(self, name, modulepath, objname):
+        def load():
+            mod = __import__(modulepath)
+            for token in modulepath.split(".")[1:]:
+                mod = getattr(mod, token)
+            return getattr(mod, objname)
+
+        self.impls[name] = load
+
+
+def verify_directory(dir_):
+    """create and/or verify a filesystem directory."""
+
+    tries = 0
+
+    while not os.path.exists(dir_):
+        try:
+            tries += 1
+            os.makedirs(dir_, 0o755)
+        except:
+            if tries > 5:
+                raise
+
+
+def to_list(x, default=None):
+    if x is None:
+        return default
+    if not isinstance(x, (list, tuple)):
+        return [x]
+    else:
+        return x
+
+
+class memoized_property:
+
+    """A read-only @property that is only evaluated once."""
+
+    def __init__(self, fget, doc=None):
+        self.fget = fget
+        self.__doc__ = doc or fget.__doc__
+        self.__name__ = fget.__name__
+
+    def __get__(self, obj, cls):
+        if obj is None:
+            return self
+        obj.__dict__[self.__name__] = result = self.fget(obj)
+        return result
+
+
+class memoized_instancemethod:
+
+    """Decorate a method memoize its return value.
+
+    Best applied to no-arg methods: memoization is not sensitive to
+    argument values, and will always return the same value even when
+    called with different arguments.
+
+    """
+
+    def __init__(self, fget, doc=None):
+        self.fget = fget
+        self.__doc__ = doc or fget.__doc__
+        self.__name__ = fget.__name__
+
+    def __get__(self, obj, cls):
+        if obj is None:
+            return self
+
+        def oneshot(*args, **kw):
+            result = self.fget(obj, *args, **kw)
+
+            def memo(*a, **kw):
+                return result
+
+            memo.__name__ = self.__name__
+            memo.__doc__ = self.__doc__
+            obj.__dict__[self.__name__] = memo
+            return result
+
+        oneshot.__name__ = self.__name__
+        oneshot.__doc__ = self.__doc__
+        return oneshot
+
+
+class SetLikeDict(dict):
+
+    """a dictionary that has some setlike methods on it"""
+
+    def union(self, other):
+        """produce a 'union' of this dict and another (at the key level).
+
+        values in the second dict take precedence over that of the first"""
+        x = SetLikeDict(**self)
+        x.update(other)
+        return x
+
+
+class FastEncodingBuffer:
+
+    """a very rudimentary buffer that is faster than StringIO,
+    and supports unicode data."""
+
+    def __init__(self, encoding=None, errors="strict"):
+        self.data = collections.deque()
+        self.encoding = encoding
+        self.delim = ""
+        self.errors = errors
+        self.write = self.data.append
+
+    def truncate(self):
+        self.data = collections.deque()
+        self.write = self.data.append
+
+    def getvalue(self):
+        if self.encoding:
+            return self.delim.join(self.data).encode(
+                self.encoding, self.errors
+            )
+        else:
+            return self.delim.join(self.data)
+
+
+class LRUCache(dict):
+
+    """A dictionary-like object that stores a limited number of items,
+    discarding lesser used items periodically.
+
+    this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
+    paradigm so that synchronization is not really needed.  the size management
+    is inexact.
+    """
+
+    class _Item:
+        def __init__(self, key, value):
+            self.key = key
+            self.value = value
+            self.timestamp = timeit.default_timer()
+
+        def __repr__(self):
+            return repr(self.value)
+
+    def __init__(self, capacity, threshold=0.5):
+        self.capacity = capacity
+        self.threshold = threshold
+
+    def __getitem__(self, key):
+        item = dict.__getitem__(self, key)
+        item.timestamp = timeit.default_timer()
+        return item.value
+
+    def values(self):
+        return [i.value for i in dict.values(self)]
+
+    def setdefault(self, key, value):
+        if key in self:
+            return self[key]
+        self[key] = value
+        return value
+
+    def __setitem__(self, key, value):
+        item = dict.get(self, key)
+        if item is None:
+            item = self._Item(key, value)
+            dict.__setitem__(self, key, item)
+        else:
+            item.value = value
+        self._manage_size()
+
+    def _manage_size(self):
+        while len(self) > self.capacity + self.capacity * self.threshold:
+            bytime = sorted(
+                dict.values(self),
+                key=operator.attrgetter("timestamp"),
+                reverse=True,
+            )
+            for item in bytime[self.capacity :]:
+                try:
+                    del self[item.key]
+                except KeyError:
+                    # if we couldn't find a key, most likely some other thread
+                    # broke in on us. loop around and try again
+                    break
+
+
+# Regexp to match python magic encoding line
+_PYTHON_MAGIC_COMMENT_re = re.compile(
+    r"[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)", re.VERBOSE
+)
+
+
+def parse_encoding(fp):
+    """Deduce the encoding of a Python source file (binary mode) from magic
+    comment.
+
+    It does this in the same way as the `Python interpreter`__
+
+    .. __: http://docs.python.org/ref/encodings.html
+
+    The ``fp`` argument should be a seekable file object in binary mode.
+    """
+    pos = fp.tell()
+    fp.seek(0)
+    try:
+        line1 = fp.readline()
+        has_bom = line1.startswith(codecs.BOM_UTF8)
+        if has_bom:
+            line1 = line1[len(codecs.BOM_UTF8) :]
+
+        m = _PYTHON_MAGIC_COMMENT_re.match(line1.decode("ascii", "ignore"))
+        if not m:
+            try:
+                parse(line1.decode("ascii", "ignore"))
+            except (ImportError, SyntaxError):
+                # Either it's a real syntax error, in which case the source
+                # is not valid python source, or line2 is a continuation of
+                # line1, in which case we don't want to scan line2 for a magic
+                # comment.
+                pass
+            else:
+                line2 = fp.readline()
+                m = _PYTHON_MAGIC_COMMENT_re.match(
+                    line2.decode("ascii", "ignore")
+                )
+
+        if has_bom:
+            if m:
+                raise SyntaxError(
+                    "python refuses to compile code with both a UTF8"
+                    " byte-order-mark and a magic encoding comment"
+                )
+            return "utf_8"
+        elif m:
+            return m.group(1)
+        else:
+            return None
+    finally:
+        fp.seek(pos)
+
+
+def sorted_dict_repr(d):
+    """repr() a dictionary with the keys in order.
+
+    Used by the lexer unit test to compare parse trees based on strings.
+
+    """
+    keys = list(d.keys())
+    keys.sort()
+    return "{" + ", ".join("%r: %r" % (k, d[k]) for k in keys) + "}"
+
+
+def restore__ast(_ast):
+    """Attempt to restore the required classes to the _ast module if it
+    appears to be missing them
+    """
+    if hasattr(_ast, "AST"):
+        return
+    _ast.PyCF_ONLY_AST = 2 << 9
+    m = compile(
+        """\
+def foo(): pass
+class Bar: pass
+if False: pass
+baz = 'mako'
+1 + 2 - 3 * 4 / 5
+6 // 7 % 8 << 9 >> 10
+11 & 12 ^ 13 | 14
+15 and 16 or 17
+-baz + (not +18) - ~17
+baz and 'foo' or 'bar'
+(mako is baz == baz) is not baz != mako
+mako > baz < mako >= baz <= mako
+mako in baz not in mako""",
+        "<unknown>",
+        "exec",
+        _ast.PyCF_ONLY_AST,
+    )
+    _ast.Module = type(m)
+
+    for cls in _ast.Module.__mro__:
+        if cls.__name__ == "mod":
+            _ast.mod = cls
+        elif cls.__name__ == "AST":
+            _ast.AST = cls
+
+    _ast.FunctionDef = type(m.body[0])
+    _ast.ClassDef = type(m.body[1])
+    _ast.If = type(m.body[2])
+
+    _ast.Name = type(m.body[3].targets[0])
+    _ast.Store = type(m.body[3].targets[0].ctx)
+    _ast.Str = type(m.body[3].value)
+
+    _ast.Sub = type(m.body[4].value.op)
+    _ast.Add = type(m.body[4].value.left.op)
+    _ast.Div = type(m.body[4].value.right.op)
+    _ast.Mult = type(m.body[4].value.right.left.op)
+
+    _ast.RShift = type(m.body[5].value.op)
+    _ast.LShift = type(m.body[5].value.left.op)
+    _ast.Mod = type(m.body[5].value.left.left.op)
+    _ast.FloorDiv = type(m.body[5].value.left.left.left.op)
+
+    _ast.BitOr = type(m.body[6].value.op)
+    _ast.BitXor = type(m.body[6].value.left.op)
+    _ast.BitAnd = type(m.body[6].value.left.left.op)
+
+    _ast.Or = type(m.body[7].value.op)
+    _ast.And = type(m.body[7].value.values[0].op)
+
+    _ast.Invert = type(m.body[8].value.right.op)
+    _ast.Not = type(m.body[8].value.left.right.op)
+    _ast.UAdd = type(m.body[8].value.left.right.operand.op)
+    _ast.USub = type(m.body[8].value.left.left.op)
+
+    _ast.Or = type(m.body[9].value.op)
+    _ast.And = type(m.body[9].value.values[0].op)
+
+    _ast.IsNot = type(m.body[10].value.ops[0])
+    _ast.NotEq = type(m.body[10].value.ops[1])
+    _ast.Is = type(m.body[10].value.left.ops[0])
+    _ast.Eq = type(m.body[10].value.left.ops[1])
+
+    _ast.Gt = type(m.body[11].value.ops[0])
+    _ast.Lt = type(m.body[11].value.ops[1])
+    _ast.GtE = type(m.body[11].value.ops[2])
+    _ast.LtE = type(m.body[11].value.ops[3])
+
+    _ast.In = type(m.body[12].value.ops[0])
+    _ast.NotIn = type(m.body[12].value.ops[1])
+
+
+def read_file(path, mode="rb"):
+    with open(path, mode) as fp:
+        return fp.read()
+
+
+def read_python_file(path):
+    fp = open(path, "rb")
+    try:
+        encoding = parse_encoding(fp)
+        data = fp.read()
+        if encoding:
+            data = data.decode(encoding)
+        return data
+    finally:
+        fp.close()
diff --git a/venv/Lib/site-packages/wtforms-3.1.2.dist-info/INSTALLER b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/Lib/site-packages/wtforms-3.1.2.dist-info/METADATA b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..b2846d333c6fcca3b78d09488927d40f49b9116f
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/METADATA
@@ -0,0 +1,119 @@
+Metadata-Version: 2.1
+Name: WTForms
+Version: 3.1.2
+Summary: Form validation and rendering for Python web development.
+Project-URL: Documentation, https://wtforms.readthedocs.io
+Project-URL: Changes, https://wtforms.readthedocs.io/changes
+Project-URL: Source Code, https://github.com/wtforms/wtforms/
+Project-URL: Issue Tracker, https://github.com/wtforms/wtforms/issues
+Project-URL: Chat, https://discord.gg/pallets
+Maintainer: WTForms
+License: Copyright 2008 WTForms
+        
+        Redistribution and use in source and binary forms, with or without
+        modification, are permitted provided that the following conditions are
+        met:
+        
+        1.  Redistributions of source code must retain the above copyright
+            notice, this list of conditions and the following disclaimer.
+        
+        2.  Redistributions in binary form must reproduce the above copyright
+            notice, this list of conditions and the following disclaimer in the
+            documentation and/or other materials provided with the distribution.
+        
+        3.  Neither the name of the copyright holder nor the names of its
+            contributors may be used to endorse or promote products derived from
+            this software without specific prior written permission.
+        
+        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+        "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+        LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+        PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+        HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+        SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+        TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+        PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+        LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+        NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+        SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+License-File: LICENSE.rst
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Requires-Python: >=3.8
+Requires-Dist: markupsafe
+Provides-Extra: email
+Requires-Dist: email-validator; extra == 'email'
+Description-Content-Type: text/x-rst
+
+WTForms
+=======
+
+WTForms is a flexible forms validation and rendering library for Python
+web development. It can work with whatever web framework and template
+engine you choose. It supports data validation, CSRF protection,
+internationalization (I18N), and more. There are various community
+libraries that provide closer integration with popular frameworks.
+
+
+Installation
+------------
+
+Install and update using pip:
+
+.. code-block:: text
+
+    pip install -U WTForms
+
+
+Third-Party Library Integrations
+--------------------------------
+
+WTForms is designed to work with any web framework and template engine.
+There are a number of community-provided libraries that make integrating
+with frameworks even better.
+
+-   `Flask-WTF`_ integrates with the Flask framework. It can
+    automatically load data from the request, uses Flask-Babel to
+    translate based on user-selected locale, provides full-application
+    CSRF, and more.
+-   `WTForms-Alchemy`_ provides rich support for generating forms from
+    SQLAlchemy models, including an expanded set of fields and
+    validators.
+-   `WTForms-SQLAlchemy`_ provides ORM-backed fields and form generation
+    from SQLAlchemy models.
+-   `WTForms-AppEngine`_ provides ORM-backed fields and form generation
+    from AppEnding db/ndb schema
+-   `WTForms-Django`_ provides ORM-backed fields and form generation
+    from Django models, as well as integration with Django's I18N
+    support.
+-   `WTForms-Bootstrap5`_ provides Bootstrap 5 favor renderer with
+    great customizability.
+-   `Starlette-WTF`_ integrates with Starlette and the FastAPI
+    framework, based on the features of Flask-WTF.
+-   `Bootstrap-Flask`_ Bootstrap-Flask is a collection of Jinja macros
+    for Bootstrap 4 & 5 and Flask using Flask-WTF.
+
+.. _Flask-WTF: https://flask-wtf.readthedocs.io/
+.. _WTForms-Alchemy: https://wtforms-alchemy.readthedocs.io/
+.. _WTForms-SQLAlchemy: https://github.com/wtforms/wtforms-sqlalchemy
+.. _WTForms-AppEngine: https://github.com/wtforms/wtforms-appengine
+.. _WTForms-Django: https://github.com/wtforms/wtforms-django
+.. _WTForms-Bootstrap5: https://github.com/LaunchPlatform/wtforms-bootstrap5
+.. _Starlette-WTF: https://github.com/muicss/starlette-wtf
+.. _Bootstrap-Flask: https://github.com/helloflask/bootstrap-flask
+
+
+Links
+-----
+
+-   Documentation: https://wtforms.readthedocs.io/
+-   Releases: https://pypi.org/project/WTForms/
+-   Code: https://github.com/wtforms/wtforms
+-   Issue tracker: https://github.com/wtforms/wtforms/issues
+-   Discord Chat: https://discord.gg/F65P7Z9
+-   Translation: https://hosted.weblate.org/projects/wtforms/wtforms/
diff --git a/venv/Lib/site-packages/wtforms-3.1.2.dist-info/RECORD b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..51685d33cd288942c321a2079dbe45a699cdc4fd
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/RECORD
@@ -0,0 +1,107 @@
+wtforms-3.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+wtforms-3.1.2.dist-info/METADATA,sha256=5l2xOTJgewLwiItkRaAdSjfYTjkC8ZQOB__88QaNFro,5277
+wtforms-3.1.2.dist-info/RECORD,,
+wtforms-3.1.2.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87
+wtforms-3.1.2.dist-info/licenses/LICENSE.rst,sha256=z0DWD_NPaytopT0iD4tmVntayN0RGbN7Yv0V6VGP5Zs,1475
+wtforms/__init__.py,sha256=d-g-y7HGa3aTlDTeTwvAWIADC0ZVeKm0TaM4w2CznXU,188
+wtforms/__pycache__/__init__.cpython-311.pyc,,
+wtforms/__pycache__/form.cpython-311.pyc,,
+wtforms/__pycache__/i18n.cpython-311.pyc,,
+wtforms/__pycache__/meta.cpython-311.pyc,,
+wtforms/__pycache__/utils.cpython-311.pyc,,
+wtforms/__pycache__/validators.cpython-311.pyc,,
+wtforms/csrf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+wtforms/csrf/__pycache__/__init__.cpython-311.pyc,,
+wtforms/csrf/__pycache__/core.cpython-311.pyc,,
+wtforms/csrf/__pycache__/session.cpython-311.pyc,,
+wtforms/csrf/core.py,sha256=8Ka3ZATvo9I1WqreP1Y_i1r0t7m8Wcw5cKynVvJNXes,3097
+wtforms/csrf/session.py,sha256=d-roaNYuBxN2QACwkz1_UIuAdtYyTzJTmcJ3UPe1jks,3091
+wtforms/fields/__init__.py,sha256=Peqt-1SMp4e83FGdjd_imearbY8CYMBHquUC4AHgpkg,435
+wtforms/fields/__pycache__/__init__.cpython-311.pyc,,
+wtforms/fields/__pycache__/choices.cpython-311.pyc,,
+wtforms/fields/__pycache__/core.cpython-311.pyc,,
+wtforms/fields/__pycache__/datetime.cpython-311.pyc,,
+wtforms/fields/__pycache__/form.cpython-311.pyc,,
+wtforms/fields/__pycache__/list.cpython-311.pyc,,
+wtforms/fields/__pycache__/numeric.cpython-311.pyc,,
+wtforms/fields/__pycache__/simple.cpython-311.pyc,,
+wtforms/fields/choices.py,sha256=zAW-y1s1_sKjNT3qRwWuUKsUrojD9zIR6q5LwfGOwZc,6882
+wtforms/fields/core.py,sha256=h7rZYkMcKxwOywS0EgQiCJBeoWaATqH3xlQFYVPiABo,14872
+wtforms/fields/datetime.py,sha256=dYJjDxTAE1LXfzWTML1HPwc1yT0OCA4D42c8FyBgvxk,5010
+wtforms/fields/form.py,sha256=KsEX4NTQsr3nwULTC2gxbDvC5PQWoLdMdFwsm-l6rW4,2896
+wtforms/fields/list.py,sha256=htcVonFLsQfo3iibZyvoCBIq9mOr_GHe-85aU3TeflU,6470
+wtforms/fields/numeric.py,sha256=Y39ODDk9eh6Nctb8gOx26u0ioiChIIUczpxAlK8Vyaw,6157
+wtforms/fields/simple.py,sha256=VBp8mRT5n3hRMH5ErgIjZtbqRr3jaRv2lZj4NMgs6XY,4075
+wtforms/form.py,sha256=CsRwZKHup6H4yfBzVlJJqf7PdMy1KTQRrn9BL9kTtqs,12571
+wtforms/i18n.py,sha256=8AMTTSpQ5d-hONeVN8rt3RH0gRc3Mvl4zUum8Hyif88,1958
+wtforms/locale/README.md,sha256=Rb-tZdT5rErXDoe9lOmXl8Fj8ZCZPyE9fqpb0kQ2o8Y,1515
+wtforms/locale/ar/LC_MESSAGES/wtforms.mo,sha256=BsZRqFAlVbme5QMBQ3Q7jNH1_di1oY4GFH3MC0vTyB8,4393
+wtforms/locale/ar/LC_MESSAGES/wtforms.po,sha256=p0sJ1RMKDcLj6vuy00ZvNcKXxjusvF77Bnq2rRVZGRE,6917
+wtforms/locale/bg/LC_MESSAGES/wtforms.mo,sha256=zAT_9yG7wGgzn5rxgiDM6cvQSW9rU6V9QbFFsJ6oevg,3932
+wtforms/locale/bg/LC_MESSAGES/wtforms.po,sha256=317ErIay8SyLoqcDNnGqAVRr3wLB-4IY_TFC8tPDfY4,6532
+wtforms/locale/ca/LC_MESSAGES/wtforms.mo,sha256=ouKDqnb6fgNf7oPUI3KefGVxj7WfU8pljlpGHyV0Avg,3177
+wtforms/locale/ca/LC_MESSAGES/wtforms.po,sha256=1vetDbiNdNgbVLDcEIMuvFnCqDw85UNJEOyb9MIXhKI,5691
+wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.mo,sha256=Q7wmRAxnmfTxaoEfV3w94r-Om5SWYQuEoys0JOVe1H8,3398
+wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.po,sha256=nEK6qwex37l9vaj_vE-4RopHKfLe2pcNMJH9-foNvh0,5856
+wtforms/locale/cy/LC_MESSAGES/wtforms.mo,sha256=3TPlrl4k-AEhGtE-2LFCSOqzWOwNrAaL6TIoADGD_J0,3142
+wtforms/locale/cy/LC_MESSAGES/wtforms.po,sha256=J_y04pZ5y8uVHIj6j0w9MRYIG4S0m6EUTzrPkr6wyZ8,5645
+wtforms/locale/de/LC_MESSAGES/wtforms.mo,sha256=wR0jvcipBiJt5qVld4CV-ekhFh2fs4PsLEmq_6PaJjQ,3175
+wtforms/locale/de/LC_MESSAGES/wtforms.po,sha256=lZgIK4DdzhRhXr8VZ7rkBarRcgpwIMtKjEVEAYSvOTE,5685
+wtforms/locale/de_CH/LC_MESSAGES/wtforms.mo,sha256=zAQC7domVwHEcJlFNfeKEtrcldmnd-KMKGauSb9H_uk,3169
+wtforms/locale/de_CH/LC_MESSAGES/wtforms.po,sha256=eN9bvLRaMdUrlekpA8s-Bjs7kMZAVKJuwZtGVOoyxWY,5692
+wtforms/locale/el/LC_MESSAGES/wtforms.mo,sha256=uGu8mNQCoZ0NRB0zO-3ow2RaL5YLsHPTVxAsAatkJHM,3951
+wtforms/locale/el/LC_MESSAGES/wtforms.po,sha256=_xWxMhVtah6G0emQyLYL7B3TOiUfvikzaFr9WIhd13w,6581
+wtforms/locale/en/LC_MESSAGES/wtforms.mo,sha256=xtWJicZdwLQRg3xgXPvhLwPbmlot-WS244EnuOVinZ0,3323
+wtforms/locale/en/LC_MESSAGES/wtforms.po,sha256=Yn7Pb4rFmGxwp3emJW_aAqCBBZZw-E-JTGClfL78DY8,5046
+wtforms/locale/es/LC_MESSAGES/wtforms.mo,sha256=a6mFAeAHM13rhDMK9P3NcT9d-zNQq4lw6GcOlBUM2jw,3880
+wtforms/locale/es/LC_MESSAGES/wtforms.po,sha256=fzyvoPb6qCS1lQItflWIYSYEqEG3t4F-FnuxNElaywE,5855
+wtforms/locale/et/LC_MESSAGES/wtforms.mo,sha256=_32V-6yCSeNVhqd6kaNnJ7pXbkhF9QpNmaYEb14hWEQ,3202
+wtforms/locale/et/LC_MESSAGES/wtforms.po,sha256=YNsBzM1sBD7ocOI0r7Fvxw4NPMFW5pgURnIZINFknQ8,5704
+wtforms/locale/fa/LC_MESSAGES/wtforms.mo,sha256=ra-sUT_2WqNyAJ8Rfv990IVqROYjRfkAug7uN3PleXk,3796
+wtforms/locale/fa/LC_MESSAGES/wtforms.po,sha256=4OoR7G75NIIaLDfbfjfa3Q9kafJUVIlXxpG2QeYVLos,6375
+wtforms/locale/fi/LC_MESSAGES/wtforms.mo,sha256=OJ6_1yADVNy0S92-Gd-Bv3RmmDRPXsFxAm_EACWH-yM,3152
+wtforms/locale/fi/LC_MESSAGES/wtforms.po,sha256=tk-LbQudS_s5rMgo2kFzZTSrw--kXhCY-7R8wNREl7M,5659
+wtforms/locale/fr/LC_MESSAGES/wtforms.mo,sha256=VNGlFMe9qHpLxNBm4x5OUIf3rJ2karLUlPCjS-qGnns,3957
+wtforms/locale/fr/LC_MESSAGES/wtforms.po,sha256=axI5kDnXjFNuC3B2MHb5OkbLTuTPEJwPsahhDvA6ud0,5991
+wtforms/locale/he/LC_MESSAGES/wtforms.mo,sha256=gU3A-vpo7qS2WSRrfwb7iOkO7Q8BqujfadHVX5k6rdI,3265
+wtforms/locale/he/LC_MESSAGES/wtforms.po,sha256=IqZj20FIxkLDALeNmQbbjfBjSxhINpMyKk73iMG-iWg,5851
+wtforms/locale/hu/LC_MESSAGES/wtforms.mo,sha256=nq3_-Gk_BkSWRn8HAhUj01Ip1ML3u2W3F97YWPx1sHA,3275
+wtforms/locale/hu/LC_MESSAGES/wtforms.po,sha256=62JZfqVJpj9gNlE-nHlw8iCkwVLf0TkD8Lo5zst3ZMY,5690
+wtforms/locale/it/LC_MESSAGES/wtforms.mo,sha256=y2zd0S60qDG8kpJ8ZHkg04A1K5__gcdWqhIkxdUyjz8,3248
+wtforms/locale/it/LC_MESSAGES/wtforms.po,sha256=mqYFBkEtVn2TYlsDDSulSvrEyTcpQPwsoJbjOBBnIJ0,5760
+wtforms/locale/ja/LC_MESSAGES/wtforms.mo,sha256=QDrRyspTkItnM2lbDj5XSM8MYdk76H47g5KCNaNd77g,3481
+wtforms/locale/ja/LC_MESSAGES/wtforms.po,sha256=IHGeRAA0cgjZxaShkwE6XYb5tXtF7i3WONL4DWTzzOQ,5923
+wtforms/locale/ko/LC_MESSAGES/wtforms.mo,sha256=9jP3SPTvu3arewVhOp3DwzQ_8zuDJizduGSRQsiVSg4,3671
+wtforms/locale/ko/LC_MESSAGES/wtforms.po,sha256=jGlR-wumoksE7y_HA_3KiUQ5OYJBdAYEnJSc1Qkvhp8,6116
+wtforms/locale/nb/LC_MESSAGES/wtforms.mo,sha256=mUE0cOVLPErDPtfLLSOiByqtZlH7guxedXNJZtiw1zE,3080
+wtforms/locale/nb/LC_MESSAGES/wtforms.po,sha256=XbjjHJydJvjiaKik8tRdlQrGacS2qhPwes3iucS_MW4,5588
+wtforms/locale/nl/LC_MESSAGES/wtforms.mo,sha256=RS9g32GsjaM4mQG07o7JwkttoWhFXanZXYCHc0qAAT8,3237
+wtforms/locale/nl/LC_MESSAGES/wtforms.po,sha256=BPXPne_e8xTlcVCnb0mHo01lHQIFCbTquN9v2rTCjPY,5728
+wtforms/locale/pl/LC_MESSAGES/wtforms.mo,sha256=LSJZuIFigsQP1sBoUotU0ra7NI1vAiW5wC5uuF39RbM,3428
+wtforms/locale/pl/LC_MESSAGES/wtforms.po,sha256=7PlPxAt9SnXYj3PFSWGyqZa43UaWUXpSVok2en8PRps,5980
+wtforms/locale/pt/LC_MESSAGES/wtforms.mo,sha256=ZofJuz_e8PnR6yDuF_AIACaS3rkAb-sA8COEkwqdAVo,3187
+wtforms/locale/pt/LC_MESSAGES/wtforms.po,sha256=5Wlx8BUTV_JNXut2SxlQhPmHsLnRveotYCksAh1IYos,5699
+wtforms/locale/ro/LC_MESSAGES/wtforms.mo,sha256=V783xDKGpp0a5XKmNXLGq37ESPiAzWQ2MSVH1RoIJoQ,4263
+wtforms/locale/ro/LC_MESSAGES/wtforms.po,sha256=ELLteEeBQ8ng0eiEQhZzqmmaylx0S0xnlkG9RUj5jUY,6288
+wtforms/locale/ru/LC_MESSAGES/wtforms.mo,sha256=hjgC9OkcwwkJUdH9wXsZq3rCnHa9A28QmBDvQqYzph8,4418
+wtforms/locale/ru/LC_MESSAGES/wtforms.po,sha256=cJiMvdYFDc1VP7hazaowNQ28wZIq5h_YUr0_txW9rkY,7128
+wtforms/locale/sk/LC_MESSAGES/wtforms.mo,sha256=2rCuNxKuoo8FNtN_68TseyYgjClezp9G_pLV4otwBtU,4048
+wtforms/locale/sk/LC_MESSAGES/wtforms.po,sha256=nLRlWYADOYNZpYcEKdm2I8KcSHDa4td4QSjCMQKty2M,6064
+wtforms/locale/sv/LC_MESSAGES/wtforms.mo,sha256=Sfc-z8MtLqTM4qZxMRNQTsHdoZfXABToK3Gmce9dF60,3814
+wtforms/locale/sv/LC_MESSAGES/wtforms.po,sha256=nQ5xFymJNu16hy8H_QWPCP8Vr0UPI3fNCN04IGySb5Y,5784
+wtforms/locale/tr/LC_MESSAGES/wtforms.mo,sha256=QmIaqMeJ3Of_Uu1tzgXvxZ7POcTZS4PjbXXLQem63Ck,3881
+wtforms/locale/tr/LC_MESSAGES/wtforms.po,sha256=EllXH7691EZ19b9wH5dDVgNbMUNTBrgGO621WPxKomg,5850
+wtforms/locale/uk/LC_MESSAGES/wtforms.mo,sha256=PbK2gG3vFhNsUf04lj0xOOmbiwCl2L6m7NSO5y2dZho,4244
+wtforms/locale/uk/LC_MESSAGES/wtforms.po,sha256=Xy83Ta-3r5I_5oRPzEKH7MbKKXldcgtL1s1QD-7fV-A,6882
+wtforms/locale/wtforms.pot,sha256=NVZ5zI_at7V5-Zy2YNxD5q7RXCTpKs-E5oz7TUVXQkA,4247
+wtforms/locale/zh/LC_MESSAGES/wtforms.mo,sha256=tNpnPZjQhQu9dSiVUnMewsE4uL-M6E72ioExQff_VB8,3296
+wtforms/locale/zh/LC_MESSAGES/wtforms.po,sha256=1yuPp30aOGX3afZLifuRaW1hFzAIZxhWVIH28ltIxnU,5810
+wtforms/locale/zh_TW/LC_MESSAGES/wtforms.mo,sha256=H8TpK0yw-u2Nn60UwOS-9-u4auhnjxngGiv2wOnG0XQ,3099
+wtforms/locale/zh_TW/LC_MESSAGES/wtforms.po,sha256=yTbfEDmd6GOIAmHibNpIL6_BdyqVsDKPNYdPDESnl4Y,5518
+wtforms/meta.py,sha256=SU8A6gDcd2wDkVzNmSGMLBBly_fMBTKVB7BD6GCfxYo,4097
+wtforms/utils.py,sha256=_BJIhyT-Fy0ve88OzJvYyaRm7LyJjnLFs7tiTWveslE,2310
+wtforms/validators.py,sha256=R2Hxdx8nTDSO9xz97Xa0bujW8JjkA7CgKSRBjveMJgk,21753
+wtforms/widgets/__init__.py,sha256=4UXr1cMvil5x9zQp4dlXpo52mEQHeSKKRfmiXyS-u5Y,119
+wtforms/widgets/__pycache__/__init__.cpython-311.pyc,,
+wtforms/widgets/__pycache__/core.cpython-311.pyc,,
+wtforms/widgets/core.py,sha256=_wddZ9ZEQVFrDlLwGOErB0q_MUvGewFpOmFFuLROQnE,16118
diff --git a/venv/Lib/site-packages/wtforms-3.1.2.dist-info/WHEEL b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..ba1a8af28bcccdacebb8c22dfda1537447a1a58a
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.18.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/venv/Lib/site-packages/wtforms-3.1.2.dist-info/licenses/LICENSE.rst b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/licenses/LICENSE.rst
new file mode 100644
index 0000000000000000000000000000000000000000..30cbc2b394886e7f7d08a9cde1a30ae5ee0bd83f
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms-3.1.2.dist-info/licenses/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2008 WTForms
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1.  Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+2.  Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+3.  Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/venv/Lib/site-packages/wtforms/__init__.py b/venv/Lib/site-packages/wtforms/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6041485024df9f3d5543ff5863d5ed3f9b249bb0
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/__init__.py
@@ -0,0 +1,7 @@
+from wtforms import validators
+from wtforms import widgets
+from wtforms.fields import *
+from wtforms.form import Form
+from wtforms.validators import ValidationError
+
+__version__ = "3.1.2"
diff --git a/venv/Lib/site-packages/wtforms/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/wtforms/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a48c8fef67982a24af17a9ee9454f0aa2a48245
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/__pycache__/form.cpython-311.pyc b/venv/Lib/site-packages/wtforms/__pycache__/form.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..52a692b3f36a36aa1b5605cff621b50188f05298
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/__pycache__/form.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/__pycache__/i18n.cpython-311.pyc b/venv/Lib/site-packages/wtforms/__pycache__/i18n.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..97bfee2b4c7e031af5cecf06fa0c2d2e9fdc481e
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/__pycache__/i18n.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/__pycache__/meta.cpython-311.pyc b/venv/Lib/site-packages/wtforms/__pycache__/meta.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7abe0809f7d9581b69f235288eb9b549d28592f4
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/__pycache__/meta.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/__pycache__/utils.cpython-311.pyc b/venv/Lib/site-packages/wtforms/__pycache__/utils.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38962b6111a06d534600de96aa2057a847d99a7b
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/__pycache__/utils.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/__pycache__/validators.cpython-311.pyc b/venv/Lib/site-packages/wtforms/__pycache__/validators.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1071165d60cd726c8a65dca80164878e19989b5c
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/__pycache__/validators.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/csrf/__init__.py b/venv/Lib/site-packages/wtforms/csrf/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/Lib/site-packages/wtforms/csrf/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/wtforms/csrf/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b334a1145c26b9d67786aa41cea226e432662ed
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/csrf/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/csrf/__pycache__/core.cpython-311.pyc b/venv/Lib/site-packages/wtforms/csrf/__pycache__/core.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..550b2cab802eb4ad7d10029d20f17ced87a566be
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/csrf/__pycache__/core.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/csrf/__pycache__/session.cpython-311.pyc b/venv/Lib/site-packages/wtforms/csrf/__pycache__/session.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cb6dcec30b952164151839b4f33d65646a2b4c9
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/csrf/__pycache__/session.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/csrf/core.py b/venv/Lib/site-packages/wtforms/csrf/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..5172c77d13ba39fc729dd95968f40ef6df07b066
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/csrf/core.py
@@ -0,0 +1,96 @@
+from wtforms.fields import HiddenField
+from wtforms.validators import ValidationError
+
+__all__ = ("CSRFTokenField", "CSRF")
+
+
+class CSRFTokenField(HiddenField):
+    """
+    A subclass of HiddenField designed for sending the CSRF token that is used
+    for most CSRF protection schemes.
+
+    Notably different from a normal field, this field always renders the
+    current token regardless of the submitted value, and also will not be
+    populated over to object data via populate_obj
+    """
+
+    current_token = None
+
+    def __init__(self, *args, **kw):
+        self.csrf_impl = kw.pop("csrf_impl")
+        super().__init__(*args, **kw)
+
+    def _value(self):
+        """
+        We want to always return the current token on render, regardless of
+        whether a good or bad token was passed.
+        """
+        return self.current_token
+
+    def populate_obj(self, *args):
+        """
+        Don't populate objects with the CSRF token
+        """
+        pass
+
+    def pre_validate(self, form):
+        """
+        Handle validation of this token field.
+        """
+        self.csrf_impl.validate_csrf_token(form, self)
+
+    def process(self, *args, **kwargs):
+        super().process(*args, **kwargs)
+        self.current_token = self.csrf_impl.generate_csrf_token(self)
+
+
+class CSRF:
+    field_class = CSRFTokenField
+
+    def setup_form(self, form):
+        """
+        Receive the form we're attached to and set up fields.
+
+        The default implementation creates a single field of
+        type :attr:`field_class` with name taken from the
+        ``csrf_field_name`` of the class meta.
+
+        :param form:
+            The form instance we're attaching to.
+        :return:
+            A sequence of `(field_name, unbound_field)` 2-tuples which
+            are unbound fields to be added to the form.
+        """
+        meta = form.meta
+        field_name = meta.csrf_field_name
+        unbound_field = self.field_class(label="CSRF Token", csrf_impl=self)
+        return [(field_name, unbound_field)]
+
+    def generate_csrf_token(self, csrf_token_field):
+        """
+        Implementations must override this to provide a method with which one
+        can get a CSRF token for this form.
+
+        A CSRF token is usually a string that is generated deterministically
+        based on some sort of user data, though it can be anything which you
+        can validate on a subsequent request.
+
+        :param csrf_token_field:
+            The field which is being used for CSRF.
+        :return:
+            A generated CSRF string.
+        """
+        raise NotImplementedError()
+
+    def validate_csrf_token(self, form, field):
+        """
+        Override this method to provide custom CSRF validation logic.
+
+        The default CSRF validation logic simply checks if the recently
+        generated token equals the one we received as formdata.
+
+        :param form: The form which has this CSRF token.
+        :param field: The CSRF token field.
+        """
+        if field.current_token != field.data:
+            raise ValidationError(field.gettext("Invalid CSRF Token."))
diff --git a/venv/Lib/site-packages/wtforms/csrf/session.py b/venv/Lib/site-packages/wtforms/csrf/session.py
new file mode 100644
index 0000000000000000000000000000000000000000..62ba7638e0982f9c7de1dc8e7d45313a3709b847
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/csrf/session.py
@@ -0,0 +1,92 @@
+"""
+A provided CSRF implementation which puts CSRF data in a session.
+
+This can be used fairly comfortably with many `request.session` type
+objects, including the Werkzeug/Flask session store, Django sessions, and
+potentially other similar objects which use a dict-like API for storing
+session keys.
+
+The basic concept is a randomly generated value is stored in the user's
+session, and an hmac-sha1 of it (along with an optional expiration time,
+for extra security) is used as the value of the csrf_token. If this token
+validates with the hmac of the random value + expiration time, and the
+expiration time is not passed, the CSRF validation will pass.
+"""
+import hmac
+import os
+from datetime import datetime
+from datetime import timedelta
+from hashlib import sha1
+
+from ..validators import ValidationError
+from .core import CSRF
+
+__all__ = ("SessionCSRF",)
+
+
+class SessionCSRF(CSRF):
+    TIME_FORMAT = "%Y%m%d%H%M%S"
+
+    def setup_form(self, form):
+        self.form_meta = form.meta
+        return super().setup_form(form)
+
+    def generate_csrf_token(self, csrf_token_field):
+        meta = self.form_meta
+        if meta.csrf_secret is None:
+            raise Exception(
+                "must set `csrf_secret` on class Meta for SessionCSRF to work"
+            )
+        if meta.csrf_context is None:
+            raise TypeError("Must provide a session-like object as csrf context")
+
+        session = self.session
+
+        if "csrf" not in session:
+            session["csrf"] = sha1(os.urandom(64)).hexdigest()
+
+        if self.time_limit:
+            expires = (self.now() + self.time_limit).strftime(self.TIME_FORMAT)
+            csrf_build = "{}{}".format(session["csrf"], expires)
+        else:
+            expires = ""
+            csrf_build = session["csrf"]
+
+        hmac_csrf = hmac.new(
+            meta.csrf_secret, csrf_build.encode("utf8"), digestmod=sha1
+        )
+        return f"{expires}##{hmac_csrf.hexdigest()}"
+
+    def validate_csrf_token(self, form, field):
+        meta = self.form_meta
+        if not field.data or "##" not in field.data:
+            raise ValidationError(field.gettext("CSRF token missing."))
+
+        expires, hmac_csrf = field.data.split("##", 1)
+
+        check_val = (self.session["csrf"] + expires).encode("utf8")
+
+        hmac_compare = hmac.new(meta.csrf_secret, check_val, digestmod=sha1)
+        if hmac_compare.hexdigest() != hmac_csrf:
+            raise ValidationError(field.gettext("CSRF failed."))
+
+        if self.time_limit:
+            now_formatted = self.now().strftime(self.TIME_FORMAT)
+            if now_formatted > expires:
+                raise ValidationError(field.gettext("CSRF token expired."))
+
+    def now(self):
+        """
+        Get the current time. Used for test mocking/overriding mainly.
+        """
+        return datetime.now()
+
+    @property
+    def time_limit(self):
+        return getattr(self.form_meta, "csrf_time_limit", timedelta(minutes=30))
+
+    @property
+    def session(self):
+        return getattr(
+            self.form_meta.csrf_context, "session", self.form_meta.csrf_context
+        )
diff --git a/venv/Lib/site-packages/wtforms/fields/__init__.py b/venv/Lib/site-packages/wtforms/fields/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f72189c34849c418bee945e1e54df7340ce233c9
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/fields/__init__.py
@@ -0,0 +1,11 @@
+from wtforms.fields.choices import *
+from wtforms.fields.choices import SelectFieldBase
+from wtforms.fields.core import Field
+from wtforms.fields.core import Flags
+from wtforms.fields.core import Label
+from wtforms.fields.datetime import *
+from wtforms.fields.form import *
+from wtforms.fields.list import *
+from wtforms.fields.numeric import *
+from wtforms.fields.simple import *
+from wtforms.utils import unset_value as _unset_value
diff --git a/venv/Lib/site-packages/wtforms/fields/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/wtforms/fields/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..42987c324e0c80820662dfdff76ad44c07b7b071
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/fields/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/fields/__pycache__/choices.cpython-311.pyc b/venv/Lib/site-packages/wtforms/fields/__pycache__/choices.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c447da5a78fde0f41236bfa078f32e241224c5e3
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/fields/__pycache__/choices.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/fields/__pycache__/core.cpython-311.pyc b/venv/Lib/site-packages/wtforms/fields/__pycache__/core.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b31fce121ff816983784f929ed4a85b47fadb7a
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/fields/__pycache__/core.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/fields/__pycache__/datetime.cpython-311.pyc b/venv/Lib/site-packages/wtforms/fields/__pycache__/datetime.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f8337fc7f13576135e7a44b678244f24623cb6a
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/fields/__pycache__/datetime.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/fields/__pycache__/form.cpython-311.pyc b/venv/Lib/site-packages/wtforms/fields/__pycache__/form.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a85236c6f2e1cd83d75f4d43c4f6da8f536363a7
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/fields/__pycache__/form.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/fields/__pycache__/list.cpython-311.pyc b/venv/Lib/site-packages/wtforms/fields/__pycache__/list.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b2cc6a3de61f2d13a8b113b112cd1094931121b
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/fields/__pycache__/list.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/fields/__pycache__/numeric.cpython-311.pyc b/venv/Lib/site-packages/wtforms/fields/__pycache__/numeric.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24dc70e980d238e317cab33d90a80b1e833735d1
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/fields/__pycache__/numeric.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/fields/__pycache__/simple.cpython-311.pyc b/venv/Lib/site-packages/wtforms/fields/__pycache__/simple.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd6b1caa760a267a3056d5f33f1dc3fca1a73db4
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/fields/__pycache__/simple.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/fields/choices.py b/venv/Lib/site-packages/wtforms/fields/choices.py
new file mode 100644
index 0000000000000000000000000000000000000000..62ed4976360abf1ab5fcace870fc29adfa64789b
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/fields/choices.py
@@ -0,0 +1,229 @@
+import itertools
+
+from wtforms import widgets
+from wtforms.fields.core import Field
+from wtforms.validators import ValidationError
+
+__all__ = (
+    "SelectField",
+    "SelectMultipleField",
+    "RadioField",
+)
+
+
+class SelectFieldBase(Field):
+    option_widget = widgets.Option()
+
+    """
+    Base class for fields which can be iterated to produce options.
+
+    This isn't a field, but an abstract base class for fields which want to
+    provide this functionality.
+    """
+
+    def __init__(self, label=None, validators=None, option_widget=None, **kwargs):
+        super().__init__(label, validators, **kwargs)
+
+        if option_widget is not None:
+            self.option_widget = option_widget
+
+    def iter_choices(self):
+        """
+        Provides data for choice widget rendering. Must return a sequence or
+        iterable of (value, label, selected, render_kw) tuples.
+        """
+        raise NotImplementedError()
+
+    def has_groups(self):
+        return False
+
+    def iter_groups(self):
+        raise NotImplementedError()
+
+    def __iter__(self):
+        opts = dict(
+            widget=self.option_widget,
+            validators=self.validators,
+            name=self.name,
+            render_kw=self.render_kw,
+            _form=None,
+            _meta=self.meta,
+        )
+        for i, choice in enumerate(self.iter_choices()):
+            if len(choice) == 4:
+                value, label, checked, render_kw = choice
+            else:
+                value, label, checked = choice
+                render_kw = {}
+
+            opt = self._Option(
+                label=label, id="%s-%d" % (self.id, i), **opts, **render_kw
+            )
+            opt.process(None, value)
+            opt.checked = checked
+            yield opt
+
+    class _Option(Field):
+        checked = False
+
+        def _value(self):
+            return str(self.data)
+
+
+class SelectField(SelectFieldBase):
+    widget = widgets.Select()
+
+    def __init__(
+        self,
+        label=None,
+        validators=None,
+        coerce=str,
+        choices=None,
+        validate_choice=True,
+        **kwargs,
+    ):
+        super().__init__(label, validators, **kwargs)
+        self.coerce = coerce
+        if callable(choices):
+            choices = choices()
+        if choices is not None:
+            self.choices = choices if isinstance(choices, dict) else list(choices)
+        else:
+            self.choices = None
+        self.validate_choice = validate_choice
+
+    def iter_choices(self):
+        if not self.choices:
+            choices = []
+        elif isinstance(self.choices, dict):
+            choices = list(itertools.chain.from_iterable(self.choices.values()))
+        else:
+            choices = self.choices
+
+        return self._choices_generator(choices)
+
+    def has_groups(self):
+        return isinstance(self.choices, dict)
+
+    def iter_groups(self):
+        if isinstance(self.choices, dict):
+            for label, choices in self.choices.items():
+                yield (label, self._choices_generator(choices))
+
+    def _choices_generator(self, choices):
+        if not choices:
+            _choices = []
+
+        elif isinstance(choices[0], (list, tuple)):
+            _choices = choices
+
+        else:
+            _choices = zip(choices, choices)
+
+        for value, label, *other_args in _choices:
+            selected = self.coerce(value) == self.data
+            render_kw = other_args[0] if len(other_args) else {}
+            yield (value, label, selected, render_kw)
+
+    def process_data(self, value):
+        try:
+            # If value is None, don't coerce to a value
+            self.data = self.coerce(value) if value is not None else None
+        except (ValueError, TypeError):
+            self.data = None
+
+    def process_formdata(self, valuelist):
+        if not valuelist:
+            return
+
+        try:
+            self.data = self.coerce(valuelist[0])
+        except ValueError as exc:
+            raise ValueError(self.gettext("Invalid Choice: could not coerce.")) from exc
+
+    def pre_validate(self, form):
+        if not self.validate_choice:
+            return
+
+        if self.choices is None:
+            raise TypeError(self.gettext("Choices cannot be None."))
+
+        for _, _, match, *_ in self.iter_choices():
+            if match:
+                break
+        else:
+            raise ValidationError(self.gettext("Not a valid choice."))
+
+
+class SelectMultipleField(SelectField):
+    """
+    No different from a normal select field, except this one can take (and
+    validate) multiple choices.  You'll need to specify the HTML `size`
+    attribute to the select field when rendering.
+    """
+
+    widget = widgets.Select(multiple=True)
+
+    def _choices_generator(self, choices):
+        if not choices:
+            _choices = []
+
+        elif isinstance(choices[0], (list, tuple)):
+            _choices = choices
+
+        else:
+            _choices = zip(choices, choices)
+
+        for value, label, *other_args in _choices:
+            selected = self.data is not None and self.coerce(value) in self.data
+            render_kw = other_args[0] if len(other_args) else {}
+            yield (value, label, selected, render_kw)
+
+    def process_data(self, value):
+        try:
+            self.data = list(self.coerce(v) for v in value)
+        except (ValueError, TypeError):
+            self.data = None
+
+    def process_formdata(self, valuelist):
+        try:
+            self.data = list(self.coerce(x) for x in valuelist)
+        except ValueError as exc:
+            raise ValueError(
+                self.gettext(
+                    "Invalid choice(s): one or more data inputs could not be coerced."
+                )
+            ) from exc
+
+    def pre_validate(self, form):
+        if not self.validate_choice or not self.data:
+            return
+
+        if self.choices is None:
+            raise TypeError(self.gettext("Choices cannot be None."))
+
+        acceptable = [self.coerce(choice[0]) for choice in self.iter_choices()]
+        if any(data not in acceptable for data in self.data):
+            unacceptable = [
+                str(data) for data in set(self.data) if data not in acceptable
+            ]
+            raise ValidationError(
+                self.ngettext(
+                    "'%(value)s' is not a valid choice for this field.",
+                    "'%(value)s' are not valid choices for this field.",
+                    len(unacceptable),
+                )
+                % dict(value="', '".join(unacceptable))
+            )
+
+
+class RadioField(SelectField):
+    """
+    Like a SelectField, except displays a list of radio buttons.
+
+    Iterating the field will produce subfields (each containing a label as
+    well) in order to allow custom rendering of the individual radio fields.
+    """
+
+    widget = widgets.ListWidget(prefix_label=False)
+    option_widget = widgets.RadioInput()
diff --git a/venv/Lib/site-packages/wtforms/fields/core.py b/venv/Lib/site-packages/wtforms/fields/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..2aba50083f2429c13ddabb0bd65e313e4e286808
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/fields/core.py
@@ -0,0 +1,457 @@
+import inspect
+import itertools
+import warnings
+
+from markupsafe import escape
+from markupsafe import Markup
+
+from wtforms import widgets
+from wtforms.i18n import DummyTranslations
+from wtforms.utils import unset_value
+from wtforms.validators import StopValidation
+from wtforms.validators import ValidationError
+
+
+class Field:
+    """
+    Field base class
+    """
+
+    errors = tuple()
+    process_errors = tuple()
+    raw_data = None
+    validators = tuple()
+    widget = None
+    _formfield = True
+    _translations = DummyTranslations()
+    do_not_call_in_templates = True  # Allow Django 1.4 traversal
+
+    def __new__(cls, *args, **kwargs):
+        if "_form" in kwargs:
+            return super().__new__(cls)
+        else:
+            return UnboundField(cls, *args, **kwargs)
+
+    def __init__(
+        self,
+        label=None,
+        validators=None,
+        filters=(),
+        description="",
+        id=None,
+        default=None,
+        widget=None,
+        render_kw=None,
+        name=None,
+        _form=None,
+        _prefix="",
+        _translations=None,
+        _meta=None,
+    ):
+        """
+        Construct a new field.
+
+        :param label:
+            The label of the field.
+        :param validators:
+            A sequence of validators to call when `validate` is called.
+        :param filters:
+            A sequence of callable which are run by :meth:`~Field.process`
+            to filter or transform the input data. For example
+            ``StringForm(filters=[str.strip, str.upper])``.
+            Note that filters are applied after processing the default and
+            incoming data, but before validation.
+        :param description:
+            A description for the field, typically used for help text.
+        :param id:
+            An id to use for the field. A reasonable default is set by the form,
+            and you shouldn't need to set this manually.
+        :param default:
+            The default value to assign to the field, if no form or object
+            input is provided. May be a callable.
+        :param widget:
+            If provided, overrides the widget used to render the field.
+        :param dict render_kw:
+            If provided, a dictionary which provides default keywords that
+            will be given to the widget at render time.
+        :param name:
+            The HTML name of this field. The default value is the Python
+            attribute name.
+        :param _form:
+            The form holding this field. It is passed by the form itself during
+            construction. You should never pass this value yourself.
+        :param _prefix:
+            The prefix to prepend to the form name of this field, passed by
+            the enclosing form during construction.
+        :param _translations:
+            A translations object providing message translations. Usually
+            passed by the enclosing form during construction. See
+            :doc:`I18n docs <i18n>` for information on message translations.
+        :param _meta:
+            If provided, this is the 'meta' instance from the form. You usually
+            don't pass this yourself.
+
+        If `_form` isn't provided, an :class:`UnboundField` will be
+        returned instead. Call its :func:`bind` method with a form instance and
+        a name to construct the field.
+        """
+        if _translations is not None:
+            self._translations = _translations
+
+        if _meta is not None:
+            self.meta = _meta
+        elif _form is not None:
+            self.meta = _form.meta
+        else:
+            raise TypeError("Must provide one of _form or _meta")
+
+        self.default = default
+        self.description = description
+        self.render_kw = render_kw
+        self.filters = filters
+        self.flags = Flags()
+        self.name = _prefix + name
+        self.short_name = name
+        self.type = type(self).__name__
+
+        self.check_validators(validators)
+        self.validators = validators or self.validators
+
+        self.id = id or self.name
+        self.label = Label(
+            self.id,
+            label
+            if label is not None
+            else self.gettext(name.replace("_", " ").title()),
+        )
+
+        if widget is not None:
+            self.widget = widget
+
+        for v in itertools.chain(self.validators, [self.widget]):
+            flags = getattr(v, "field_flags", {})
+
+            # check for legacy format, remove eventually
+            if isinstance(flags, tuple):  # pragma: no cover
+                warnings.warn(
+                    "Flags should be stored in dicts and not in tuples. "
+                    "The next version of WTForms will abandon support "
+                    "for flags in tuples.",
+                    DeprecationWarning,
+                    stacklevel=2,
+                )
+                flags = {flag_name: True for flag_name in flags}
+
+            for k, v in flags.items():
+                setattr(self.flags, k, v)
+
+    def __str__(self):
+        """
+        Returns a HTML representation of the field. For more powerful rendering,
+        see the `__call__` method.
+        """
+        return self()
+
+    def __html__(self):
+        """
+        Returns a HTML representation of the field. For more powerful rendering,
+        see the :meth:`__call__` method.
+        """
+        return self()
+
+    def __call__(self, **kwargs):
+        """
+        Render this field as HTML, using keyword args as additional attributes.
+
+        This delegates rendering to
+        :meth:`meta.render_field <wtforms.meta.DefaultMeta.render_field>`
+        whose default behavior is to call the field's widget, passing any
+        keyword arguments from this call along to the widget.
+
+        In all of the WTForms HTML widgets, keyword arguments are turned to
+        HTML attributes, though in theory a widget is free to do anything it
+        wants with the supplied keyword arguments, and widgets don't have to
+        even do anything related to HTML.
+        """
+        return self.meta.render_field(self, kwargs)
+
+    @classmethod
+    def check_validators(cls, validators):
+        if validators is not None:
+            for validator in validators:
+                if not callable(validator):
+                    raise TypeError(
+                        "{} is not a valid validator because it is not "
+                        "callable".format(validator)
+                    )
+
+                if inspect.isclass(validator):
+                    raise TypeError(
+                        "{} is not a valid validator because it is a class, "
+                        "it should be an instance".format(validator)
+                    )
+
+    def gettext(self, string):
+        """
+        Get a translation for the given message.
+
+        This proxies for the internal translations object.
+
+        :param string: A string to be translated.
+        :return: A string which is the translated output.
+        """
+        return self._translations.gettext(string)
+
+    def ngettext(self, singular, plural, n):
+        """
+        Get a translation for a message which can be pluralized.
+
+        :param str singular: The singular form of the message.
+        :param str plural: The plural form of the message.
+        :param int n: The number of elements this message is referring to
+        """
+        return self._translations.ngettext(singular, plural, n)
+
+    def validate(self, form, extra_validators=()):
+        """
+        Validates the field and returns True or False. `self.errors` will
+        contain any errors raised during validation. This is usually only
+        called by `Form.validate`.
+
+        Subfields shouldn't override this, but rather override either
+        `pre_validate`, `post_validate` or both, depending on needs.
+
+        :param form: The form the field belongs to.
+        :param extra_validators: A sequence of extra validators to run.
+        """
+        self.errors = list(self.process_errors)
+        stop_validation = False
+
+        # Check the type of extra_validators
+        self.check_validators(extra_validators)
+
+        # Call pre_validate
+        try:
+            self.pre_validate(form)
+        except StopValidation as e:
+            if e.args and e.args[0]:
+                self.errors.append(e.args[0])
+            stop_validation = True
+        except ValidationError as e:
+            self.errors.append(e.args[0])
+
+        # Run validators
+        if not stop_validation:
+            chain = itertools.chain(self.validators, extra_validators)
+            stop_validation = self._run_validation_chain(form, chain)
+
+        # Call post_validate
+        try:
+            self.post_validate(form, stop_validation)
+        except ValidationError as e:
+            self.errors.append(e.args[0])
+
+        return len(self.errors) == 0
+
+    def _run_validation_chain(self, form, validators):
+        """
+        Run a validation chain, stopping if any validator raises StopValidation.
+
+        :param form: The Form instance this field belongs to.
+        :param validators: a sequence or iterable of validator callables.
+        :return: True if validation was stopped, False otherwise.
+        """
+        for validator in validators:
+            try:
+                validator(form, self)
+            except StopValidation as e:
+                if e.args and e.args[0]:
+                    self.errors.append(e.args[0])
+                return True
+            except ValidationError as e:
+                self.errors.append(e.args[0])
+
+        return False
+
+    def pre_validate(self, form):
+        """
+        Override if you need field-level validation. Runs before any other
+        validators.
+
+        :param form: The form the field belongs to.
+        """
+        pass
+
+    def post_validate(self, form, validation_stopped):
+        """
+        Override if you need to run any field-level validation tasks after
+        normal validation. This shouldn't be needed in most cases.
+
+        :param form: The form the field belongs to.
+        :param validation_stopped:
+            `True` if any validator raised StopValidation.
+        """
+        pass
+
+    def process(self, formdata, data=unset_value, extra_filters=None):
+        """
+        Process incoming data, calling process_data, process_formdata as needed,
+        and run filters.
+
+        If `data` is not provided, process_data will be called on the field's
+        default.
+
+        Field subclasses usually won't override this, instead overriding the
+        process_formdata and process_data methods. Only override this for
+        special advanced processing, such as when a field encapsulates many
+        inputs.
+
+        :param extra_filters: A sequence of extra filters to run.
+        """
+        self.process_errors = []
+        if data is unset_value:
+            try:
+                data = self.default()
+            except TypeError:
+                data = self.default
+
+        self.object_data = data
+
+        try:
+            self.process_data(data)
+        except ValueError as e:
+            self.process_errors.append(e.args[0])
+
+        if formdata is not None:
+            if self.name in formdata:
+                self.raw_data = formdata.getlist(self.name)
+            else:
+                self.raw_data = []
+
+            try:
+                self.process_formdata(self.raw_data)
+            except ValueError as e:
+                self.process_errors.append(e.args[0])
+
+        try:
+            for filter in itertools.chain(self.filters, extra_filters or []):
+                self.data = filter(self.data)
+        except ValueError as e:
+            self.process_errors.append(e.args[0])
+
+    def process_data(self, value):
+        """
+        Process the Python data applied to this field and store the result.
+
+        This will be called during form construction by the form's `kwargs` or
+        `obj` argument.
+
+        :param value: The python object containing the value to process.
+        """
+        self.data = value
+
+    def process_formdata(self, valuelist):
+        """
+        Process data received over the wire from a form.
+
+        This will be called during form construction with data supplied
+        through the `formdata` argument.
+
+        :param valuelist: A list of strings to process.
+        """
+        if valuelist:
+            self.data = valuelist[0]
+
+    def populate_obj(self, obj, name):
+        """
+        Populates `obj.<name>` with the field's data.
+
+        :note: This is a destructive operation. If `obj.<name>` already exists,
+               it will be overridden. Use with caution.
+        """
+        setattr(obj, name, self.data)
+
+
+class UnboundField:
+    _formfield = True
+    creation_counter = 0
+
+    def __init__(self, field_class, *args, name=None, **kwargs):
+        UnboundField.creation_counter += 1
+        self.field_class = field_class
+        self.args = args
+        self.name = name
+        self.kwargs = kwargs
+        self.creation_counter = UnboundField.creation_counter
+        validators = kwargs.get("validators")
+        if validators:
+            self.field_class.check_validators(validators)
+
+    def bind(self, form, name, prefix="", translations=None, **kwargs):
+        kw = dict(
+            self.kwargs,
+            name=name,
+            _form=form,
+            _prefix=prefix,
+            _translations=translations,
+            **kwargs,
+        )
+        return self.field_class(*self.args, **kw)
+
+    def __repr__(self):
+        return "<UnboundField({}, {!r}, {!r})>".format(
+            self.field_class.__name__, self.args, self.kwargs
+        )
+
+
+class Flags:
+    """
+    Holds a set of flags as attributes.
+
+    Accessing a non-existing attribute returns None for its value.
+    """
+
+    def __getattr__(self, name):
+        if name.startswith("_"):
+            return super().__getattr__(name)
+        return None
+
+    def __contains__(self, name):
+        return getattr(self, name)
+
+    def __repr__(self):
+        flags = (
+            f"{name}={getattr(self, name)}"
+            for name in dir(self)
+            if not name.startswith("_")
+        )
+        return "<wtforms.fields.Flags: {%s}>" % ", ".join(flags)
+
+
+class Label:
+    """
+    An HTML form label.
+    """
+
+    def __init__(self, field_id, text):
+        self.field_id = field_id
+        self.text = text
+
+    def __str__(self):
+        return self()
+
+    def __html__(self):
+        return self()
+
+    def __call__(self, text=None, **kwargs):
+        if "for_" in kwargs:
+            kwargs["for"] = kwargs.pop("for_")
+        else:
+            kwargs.setdefault("for", self.field_id)
+
+        attributes = widgets.html_params(**kwargs)
+        text = escape(text or self.text)
+        return Markup(f"<label {attributes}>{text}</label>")
+
+    def __repr__(self):
+        return f"Label({self.field_id!r}, {self.text!r})"
diff --git a/venv/Lib/site-packages/wtforms/fields/datetime.py b/venv/Lib/site-packages/wtforms/fields/datetime.py
new file mode 100644
index 0000000000000000000000000000000000000000..63e32d77e22d41aae1ce8a494d02d9abbe47b603
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/fields/datetime.py
@@ -0,0 +1,169 @@
+import datetime
+
+from wtforms import widgets
+from wtforms.fields.core import Field
+from wtforms.utils import clean_datetime_format_for_strptime
+
+__all__ = (
+    "DateTimeField",
+    "DateField",
+    "TimeField",
+    "MonthField",
+    "DateTimeLocalField",
+    "WeekField",
+)
+
+
+class DateTimeField(Field):
+    """
+    A text field which stores a :class:`datetime.datetime` matching one or
+    several formats. If ``format`` is a list, any input value matching any
+    format will be accepted, and the first format in the list will be used
+    to produce HTML values.
+    """
+
+    widget = widgets.DateTimeInput()
+
+    def __init__(
+        self, label=None, validators=None, format="%Y-%m-%d %H:%M:%S", **kwargs
+    ):
+        super().__init__(label, validators, **kwargs)
+        self.format = format if isinstance(format, list) else [format]
+        self.strptime_format = clean_datetime_format_for_strptime(self.format)
+
+    def _value(self):
+        if self.raw_data:
+            return " ".join(self.raw_data)
+        return self.data and self.data.strftime(self.format[0]) or ""
+
+    def process_formdata(self, valuelist):
+        if not valuelist:
+            return
+
+        date_str = " ".join(valuelist)
+        for format in self.strptime_format:
+            try:
+                self.data = datetime.datetime.strptime(date_str, format)
+                return
+            except ValueError:
+                self.data = None
+
+        raise ValueError(self.gettext("Not a valid datetime value."))
+
+
+class DateField(DateTimeField):
+    """
+    Same as :class:`~wtforms.fields.DateTimeField`, except stores a
+    :class:`datetime.date`.
+    """
+
+    widget = widgets.DateInput()
+
+    def __init__(self, label=None, validators=None, format="%Y-%m-%d", **kwargs):
+        super().__init__(label, validators, format, **kwargs)
+
+    def process_formdata(self, valuelist):
+        if not valuelist:
+            return
+
+        date_str = " ".join(valuelist)
+        for format in self.strptime_format:
+            try:
+                self.data = datetime.datetime.strptime(date_str, format).date()
+                return
+            except ValueError:
+                self.data = None
+
+        raise ValueError(self.gettext("Not a valid date value."))
+
+
+class TimeField(DateTimeField):
+    """
+    Same as :class:`~wtforms.fields.DateTimeField`, except stores a
+    :class:`datetime.time`.
+    """
+
+    widget = widgets.TimeInput()
+
+    def __init__(self, label=None, validators=None, format="%H:%M", **kwargs):
+        super().__init__(label, validators, format, **kwargs)
+
+    def process_formdata(self, valuelist):
+        if not valuelist:
+            return
+
+        time_str = " ".join(valuelist)
+        for format in self.strptime_format:
+            try:
+                self.data = datetime.datetime.strptime(time_str, format).time()
+                return
+            except ValueError:
+                self.data = None
+
+        raise ValueError(self.gettext("Not a valid time value."))
+
+
+class MonthField(DateField):
+    """
+    Same as :class:`~wtforms.fields.DateField`, except represents a month,
+    stores a :class:`datetime.date` with `day = 1`.
+    """
+
+    widget = widgets.MonthInput()
+
+    def __init__(self, label=None, validators=None, format="%Y-%m", **kwargs):
+        super().__init__(label, validators, format, **kwargs)
+
+
+class WeekField(DateField):
+    """
+    Same as :class:`~wtforms.fields.DateField`, except represents a week,
+    stores a :class:`datetime.date` of the monday of the given week.
+    """
+
+    widget = widgets.WeekInput()
+
+    def __init__(self, label=None, validators=None, format="%Y-W%W", **kwargs):
+        super().__init__(label, validators, format, **kwargs)
+
+    def process_formdata(self, valuelist):
+        if not valuelist:
+            return
+
+        time_str = " ".join(valuelist)
+        for format in self.strptime_format:
+            try:
+                if "%w" not in format:
+                    # The '%w' week starting day is needed. This defaults it to monday
+                    # like ISO 8601 indicates.
+                    self.data = datetime.datetime.strptime(
+                        f"{time_str}-1", f"{format}-%w"
+                    ).date()
+                else:
+                    self.data = datetime.datetime.strptime(time_str, format).date()
+                return
+            except ValueError:
+                self.data = None
+
+        raise ValueError(self.gettext("Not a valid week value."))
+
+
+class DateTimeLocalField(DateTimeField):
+    """
+    Same as :class:`~wtforms.fields.DateTimeField`, but represents an
+    ``<input type="datetime-local">``.
+    """
+
+    widget = widgets.DateTimeLocalInput()
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "format",
+            [
+                "%Y-%m-%d %H:%M:%S",
+                "%Y-%m-%dT%H:%M:%S",
+                "%Y-%m-%d %H:%M",
+                "%Y-%m-%dT%H:%M",
+            ],
+        )
+        super().__init__(*args, **kwargs)
diff --git a/venv/Lib/site-packages/wtforms/fields/form.py b/venv/Lib/site-packages/wtforms/fields/form.py
new file mode 100644
index 0000000000000000000000000000000000000000..83f5abeaedc75f22c816fb2ac2cd2cfc55edb963
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/fields/form.py
@@ -0,0 +1,97 @@
+from .. import widgets
+from .core import Field
+from wtforms.utils import unset_value
+
+__all__ = ("FormField",)
+
+
+class FormField(Field):
+    """
+    Encapsulate a form as a field in another form.
+
+    :param form_class:
+        A subclass of Form that will be encapsulated.
+    :param separator:
+        A string which will be suffixed to this field's name to create the
+        prefix to enclosed fields. The default is fine for most uses.
+    """
+
+    widget = widgets.TableWidget()
+
+    def __init__(
+        self, form_class, label=None, validators=None, separator="-", **kwargs
+    ):
+        super().__init__(label, validators, **kwargs)
+        self.form_class = form_class
+        self.separator = separator
+        self._obj = None
+        if self.filters:
+            raise TypeError(
+                "FormField cannot take filters, as the encapsulated"
+                " data is not mutable."
+            )
+        if validators:
+            raise TypeError(
+                "FormField does not accept any validators. Instead,"
+                " define them on the enclosed form."
+            )
+
+    def process(self, formdata, data=unset_value, extra_filters=None):
+        if extra_filters:
+            raise TypeError(
+                "FormField cannot take filters, as the encapsulated"
+                "data is not mutable."
+            )
+
+        if data is unset_value:
+            try:
+                data = self.default()
+            except TypeError:
+                data = self.default
+            self._obj = data
+
+        self.object_data = data
+
+        prefix = self.name + self.separator
+        if isinstance(data, dict):
+            self.form = self.form_class(formdata=formdata, prefix=prefix, **data)
+        else:
+            self.form = self.form_class(formdata=formdata, obj=data, prefix=prefix)
+
+    def validate(self, form, extra_validators=()):
+        if extra_validators:
+            raise TypeError(
+                "FormField does not accept in-line validators, as it"
+                " gets errors from the enclosed form."
+            )
+        return self.form.validate()
+
+    def populate_obj(self, obj, name):
+        candidate = getattr(obj, name, None)
+        if candidate is None:
+            if self._obj is None:
+                raise TypeError(
+                    "populate_obj: cannot find a value to populate from"
+                    " the provided obj or input data/defaults"
+                )
+            candidate = self._obj
+
+        self.form.populate_obj(candidate)
+        setattr(obj, name, candidate)
+
+    def __iter__(self):
+        return iter(self.form)
+
+    def __getitem__(self, name):
+        return self.form[name]
+
+    def __getattr__(self, name):
+        return getattr(self.form, name)
+
+    @property
+    def data(self):
+        return self.form.data
+
+    @property
+    def errors(self):
+        return self.form.errors
diff --git a/venv/Lib/site-packages/wtforms/fields/list.py b/venv/Lib/site-packages/wtforms/fields/list.py
new file mode 100644
index 0000000000000000000000000000000000000000..db52bc046f804a899356bed228c9ffd465a25fec
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/fields/list.py
@@ -0,0 +1,201 @@
+import itertools
+
+from .. import widgets
+from .core import Field
+from .core import UnboundField
+from wtforms.utils import unset_value
+
+__all__ = ("FieldList",)
+
+
+class FieldList(Field):
+    """
+    Encapsulate an ordered list of multiple instances of the same field type,
+    keeping data as a list.
+
+    >>> authors = FieldList(StringField('Name', [validators.DataRequired()]))
+
+    :param unbound_field:
+        A partially-instantiated field definition, just like that would be
+        defined on a form directly.
+    :param min_entries:
+        if provided, always have at least this many entries on the field,
+        creating blank ones if the provided input does not specify a sufficient
+        amount.
+    :param max_entries:
+        accept no more than this many entries as input, even if more exist in
+        formdata.
+    :param separator:
+        A string which will be suffixed to this field's name to create the
+        prefix to enclosed list entries. The default is fine for most uses.
+    """
+
+    widget = widgets.ListWidget()
+
+    def __init__(
+        self,
+        unbound_field,
+        label=None,
+        validators=None,
+        min_entries=0,
+        max_entries=None,
+        separator="-",
+        default=(),
+        **kwargs,
+    ):
+        super().__init__(label, validators, default=default, **kwargs)
+        if self.filters:
+            raise TypeError(
+                "FieldList does not accept any filters. Instead, define"
+                " them on the enclosed field."
+            )
+        assert isinstance(
+            unbound_field, UnboundField
+        ), "Field must be unbound, not a field class"
+        self.unbound_field = unbound_field
+        self.min_entries = min_entries
+        self.max_entries = max_entries
+        self.last_index = -1
+        self._prefix = kwargs.get("_prefix", "")
+        self._separator = separator
+        self._field_separator = unbound_field.kwargs.get("separator", "-")
+
+    def process(self, formdata, data=unset_value, extra_filters=None):
+        if extra_filters:
+            raise TypeError(
+                "FieldList does not accept any filters. Instead, define"
+                " them on the enclosed field."
+            )
+
+        self.entries = []
+        if data is unset_value or not data:
+            try:
+                data = self.default()
+            except TypeError:
+                data = self.default
+
+        self.object_data = data
+
+        if formdata:
+            indices = sorted(set(self._extract_indices(self.name, formdata)))
+            if self.max_entries:
+                indices = indices[: self.max_entries]
+
+            idata = iter(data)
+            for index in indices:
+                try:
+                    obj_data = next(idata)
+                except StopIteration:
+                    obj_data = unset_value
+                self._add_entry(formdata, obj_data, index=index)
+        else:
+            for obj_data in data:
+                self._add_entry(formdata, obj_data)
+
+        while len(self.entries) < self.min_entries:
+            self._add_entry(formdata)
+
+    def _extract_indices(self, prefix, formdata):
+        """
+        Yield indices of any keys with given prefix.
+
+        formdata must be an object which will produce keys when iterated.  For
+        example, if field 'foo' contains keys 'foo-0-bar', 'foo-1-baz', then
+        the numbers 0 and 1 will be yielded, but not necessarily in order.
+        """
+        offset = len(prefix) + 1
+        for k in formdata:
+            if k.startswith(prefix):
+                k = k[offset:].split(self._field_separator, 1)[0]
+                if k.isdigit():
+                    yield int(k)
+
+    def validate(self, form, extra_validators=()):
+        """
+        Validate this FieldList.
+
+        Note that FieldList validation differs from normal field validation in
+        that FieldList validates all its enclosed fields first before running any
+        of its own validators.
+        """
+        self.errors = []
+
+        # Run validators on all entries within
+        for subfield in self.entries:
+            subfield.validate(form)
+            self.errors.append(subfield.errors)
+
+        if not any(x for x in self.errors):
+            self.errors = []
+
+        chain = itertools.chain(self.validators, extra_validators)
+        self._run_validation_chain(form, chain)
+
+        return len(self.errors) == 0
+
+    def populate_obj(self, obj, name):
+        values = getattr(obj, name, None)
+        try:
+            ivalues = iter(values)
+        except TypeError:
+            ivalues = iter([])
+
+        candidates = itertools.chain(ivalues, itertools.repeat(None))
+        _fake = type("_fake", (object,), {})
+        output = []
+        for field, data in zip(self.entries, candidates):
+            fake_obj = _fake()
+            fake_obj.data = data
+            field.populate_obj(fake_obj, "data")
+            output.append(fake_obj.data)
+
+        setattr(obj, name, output)
+
+    def _add_entry(self, formdata=None, data=unset_value, index=None):
+        assert (
+            not self.max_entries or len(self.entries) < self.max_entries
+        ), "You cannot have more than max_entries entries in this FieldList"
+        if index is None:
+            index = self.last_index + 1
+        self.last_index = index
+        name = f"{self.short_name}{self._separator}{index}"
+        id = f"{self.id}{self._separator}{index}"
+        field = self.unbound_field.bind(
+            form=None,
+            name=name,
+            prefix=self._prefix,
+            id=id,
+            _meta=self.meta,
+            translations=self._translations,
+        )
+        field.process(formdata, data)
+        self.entries.append(field)
+        return field
+
+    def append_entry(self, data=unset_value):
+        """
+        Create a new entry with optional default data.
+
+        Entries added in this way will *not* receive formdata however, and can
+        only receive object data.
+        """
+        return self._add_entry(data=data)
+
+    def pop_entry(self):
+        """Removes the last entry from the list and returns it."""
+        entry = self.entries.pop()
+        self.last_index -= 1
+        return entry
+
+    def __iter__(self):
+        return iter(self.entries)
+
+    def __len__(self):
+        return len(self.entries)
+
+    def __getitem__(self, index):
+        return self.entries[index]
+
+    @property
+    def data(self):
+        return [f.data for f in self.entries]
diff --git a/venv/Lib/site-packages/wtforms/fields/numeric.py b/venv/Lib/site-packages/wtforms/fields/numeric.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9e5b26bbd08468dc2c61d51f24e537d47b7d15a
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/fields/numeric.py
@@ -0,0 +1,213 @@
+import decimal
+
+from wtforms import widgets
+from wtforms.fields.core import Field
+from wtforms.utils import unset_value
+
+__all__ = (
+    "IntegerField",
+    "DecimalField",
+    "FloatField",
+    "IntegerRangeField",
+    "DecimalRangeField",
+)
+
+
+class LocaleAwareNumberField(Field):
+    """
+    Base class for implementing locale-aware number parsing.
+
+    Locale-aware numbers require the 'babel' package to be present.
+    """
+
+    def __init__(
+        self,
+        label=None,
+        validators=None,
+        use_locale=False,
+        number_format=None,
+        **kwargs,
+    ):
+        super().__init__(label, validators, **kwargs)
+        self.use_locale = use_locale
+        if use_locale:
+            self.number_format = number_format
+            self.locale = kwargs["_form"].meta.locales[0]
+            self._init_babel()
+
+    def _init_babel(self):
+        try:
+            from babel import numbers
+
+            self.babel_numbers = numbers
+        except ImportError as exc:
+            raise ImportError(
+                "Using locale-aware decimals requires the babel library."
+            ) from exc
+
+    def _parse_decimal(self, value):
+        return self.babel_numbers.parse_decimal(value, self.locale)
+
+    def _format_decimal(self, value):
+        return self.babel_numbers.format_decimal(value, self.number_format, self.locale)
+
+
+class IntegerField(Field):
+    """
+    A text field, except all input is coerced to an integer.  Erroneous input
+    is ignored and will not be accepted as a value.
+    """
+
+    widget = widgets.NumberInput()
+
+    def __init__(self, label=None, validators=None, **kwargs):
+        super().__init__(label, validators, **kwargs)
+
+    def _value(self):
+        if self.raw_data:
+            return self.raw_data[0]
+        if self.data is not None:
+            return str(self.data)
+        return ""
+
+    def process_data(self, value):
+        if value is None or value is unset_value:
+            self.data = None
+            return
+
+        try:
+            self.data = int(value)
+        except (ValueError, TypeError) as exc:
+            self.data = None
+            raise ValueError(self.gettext("Not a valid integer value.")) from exc
+
+    def process_formdata(self, valuelist):
+        if not valuelist:
+            return
+
+        try:
+            self.data = int(valuelist[0])
+        except ValueError as exc:
+            self.data = None
+            raise ValueError(self.gettext("Not a valid integer value.")) from exc
+
+
+class DecimalField(LocaleAwareNumberField):
+    """
+    A text field which displays and coerces data of the `decimal.Decimal` type.
+
+    :param places:
+        How many decimal places to quantize the value to for display on form.
+        If unset, use 2 decimal places.
+        If explicitely set to `None`, does not quantize value.
+    :param rounding:
+        How to round the value during quantize, for example
+        `decimal.ROUND_UP`. If unset, uses the rounding value from the
+        current thread's context.
+    :param use_locale:
+        If True, use locale-based number formatting. Locale-based number
+        formatting requires the 'babel' package.
+    :param number_format:
+        Optional number format for locale. If omitted, use the default decimal
+        format for the locale.
+    """
+
+    widget = widgets.NumberInput(step="any")
+
+    def __init__(
+        self, label=None, validators=None, places=unset_value, rounding=None, **kwargs
+    ):
+        super().__init__(label, validators, **kwargs)
+        if self.use_locale and (places is not unset_value or rounding is not None):
+            raise TypeError(
+                "When using locale-aware numbers, 'places' and 'rounding' are ignored."
+            )
+
+        if places is unset_value:
+            places = 2
+        self.places = places
+        self.rounding = rounding
+
+    def _value(self):
+        if self.raw_data:
+            return self.raw_data[0]
+
+        if self.data is None:
+            return ""
+
+        if self.use_locale:
+            return str(self._format_decimal(self.data))
+
+        if self.places is None:
+            return str(self.data)
+
+        if not hasattr(self.data, "quantize"):
+            # If for some reason, data is a float or int, then format
+            # as we would for floats using string formatting.
+            format = "%%0.%df" % self.places
+            return format % self.data
+
+        exp = decimal.Decimal(".1") ** self.places
+        if self.rounding is None:
+            quantized = self.data.quantize(exp)
+        else:
+            quantized = self.data.quantize(exp, rounding=self.rounding)
+        return str(quantized)
+
+    def process_formdata(self, valuelist):
+        if not valuelist:
+            return
+
+        try:
+            if self.use_locale:
+                self.data = self._parse_decimal(valuelist[0])
+            else:
+                self.data = decimal.Decimal(valuelist[0])
+        except (decimal.InvalidOperation, ValueError) as exc:
+            self.data = None
+            raise ValueError(self.gettext("Not a valid decimal value.")) from exc
+
+
+class FloatField(Field):
+    """
+    A text field, except all input is coerced to an float.  Erroneous input
+    is ignored and will not be accepted as a value.
+    """
+
+    widget = widgets.TextInput()
+
+    def __init__(self, label=None, validators=None, **kwargs):
+        super().__init__(label, validators, **kwargs)
+
+    def _value(self):
+        if self.raw_data:
+            return self.raw_data[0]
+        if self.data is not None:
+            return str(self.data)
+        return ""
+
+    def process_formdata(self, valuelist):
+        if not valuelist:
+            return
+
+        try:
+            self.data = float(valuelist[0])
+        except ValueError as exc:
+            self.data = None
+            raise ValueError(self.gettext("Not a valid float value.")) from exc
+
+
+class IntegerRangeField(IntegerField):
+    """
+    Represents an ``<input type="range">``.
+    """
+
+    widget = widgets.RangeInput()
+
+
+class DecimalRangeField(DecimalField):
+    """
+    Represents an ``<input type="range">``.
+    """
+
+    widget = widgets.RangeInput(step="any")
diff --git a/venv/Lib/site-packages/wtforms/fields/simple.py b/venv/Lib/site-packages/wtforms/fields/simple.py
new file mode 100644
index 0000000000000000000000000000000000000000..910ff96eb3f68c473f040f3987a0d3ca7625abce
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/fields/simple.py
@@ -0,0 +1,173 @@
+from .. import widgets
+from .core import Field
+
+__all__ = (
+    "BooleanField",
+    "TextAreaField",
+    "PasswordField",
+    "FileField",
+    "MultipleFileField",
+    "HiddenField",
+    "SearchField",
+    "SubmitField",
+    "StringField",
+    "TelField",
+    "URLField",
+    "EmailField",
+    "ColorField",
+)
+
+
+class BooleanField(Field):
+    """
+    Represents an ``<input type="checkbox">``. Set the ``checked``-status by using the
+    ``default``-option. Any value for ``default``, e.g. ``default="checked"`` puts
+    ``checked`` into the html-element and sets the ``data`` to ``True``
+
+    :param false_values:
+        If provided, a sequence of strings each of which is an exact match
+        string of what is considered a "false" value. Defaults to the tuple
+        ``(False, "false", "")``
+    """
+
+    widget = widgets.CheckboxInput()
+    false_values = (False, "false", "")
+
+    def __init__(self, label=None, validators=None, false_values=None, **kwargs):
+        super().__init__(label, validators, **kwargs)
+        if false_values is not None:
+            self.false_values = false_values
+
+    def process_data(self, value):
+        self.data = bool(value)
+
+    def process_formdata(self, valuelist):
+        if not valuelist or valuelist[0] in self.false_values:
+            self.data = False
+        else:
+            self.data = True
+
+    def _value(self):
+        if self.raw_data:
+            return str(self.raw_data[0])
+        return "y"
+
+
+class StringField(Field):
+    """
+    This field is the base for most of the more complicated fields, and
+    represents an ``<input type="text">``.
+    """
+
+    widget = widgets.TextInput()
+
+    def process_formdata(self, valuelist):
+        if valuelist:
+            self.data = valuelist[0]
+
+    def _value(self):
+        return str(self.data) if self.data is not None else ""
+
+
+class TextAreaField(StringField):
+    """
+    This field represents an HTML ``<textarea>`` and can be used to take
+    multi-line input.
+    """
+
+    widget = widgets.TextArea()
+
+
+class PasswordField(StringField):
+    """
+    A StringField, except renders an ``<input type="password">``.
+
+    Also, whatever value is accepted by this field is not rendered back
+    to the browser like normal fields.
+    """
+
+    widget = widgets.PasswordInput()
+
+
+class FileField(Field):
+    """Renders a file upload field.
+
+    By default, the value will be the filename sent in the form data.
+    WTForms **does not** deal with frameworks' file handling capabilities.
+    A WTForms extension for a framework may replace the filename value
+    with an object representing the uploaded data.
+    """
+
+    widget = widgets.FileInput()
+
+    def _value(self):
+        # browser ignores value of file input for security
+        return False
+
+
+class MultipleFileField(FileField):
+    """A :class:`FileField` that allows choosing multiple files."""
+
+    widget = widgets.FileInput(multiple=True)
+
+    def process_formdata(self, valuelist):
+        self.data = valuelist
+
+
+class HiddenField(StringField):
+    """
+    HiddenField is a convenience for a StringField with a HiddenInput widget.
+
+    It will render as an ``<input type="hidden">`` but otherwise coerce to a string.
+    """
+
+    widget = widgets.HiddenInput()
+
+
+class SubmitField(BooleanField):
+    """
+    Represents an ``<input type="submit">``.  This allows checking if a given
+    submit button has been pressed.
+    """
+
+    widget = widgets.SubmitInput()
+
+
+class SearchField(StringField):
+    """
+    Represents an ``<input type="search">``.
+    """
+
+    widget = widgets.SearchInput()
+
+
+class TelField(StringField):
+    """
+    Represents an ``<input type="tel">``.
+    """
+
+    widget = widgets.TelInput()
+
+
+class URLField(StringField):
+    """
+    Represents an ``<input type="url">``.
+    """
+
+    widget = widgets.URLInput()
+
+
+class EmailField(StringField):
+    """
+    Represents an ``<input type="email">``.
+    """
+
+    widget = widgets.EmailInput()
+
+
+class ColorField(StringField):
+    """
+    Represents an ``<input type="color">``.
+    """
+
+    widget = widgets.ColorInput()
diff --git a/venv/Lib/site-packages/wtforms/form.py b/venv/Lib/site-packages/wtforms/form.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6805c743aad6f263c139e1ab1fe8422c7ececa1
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/form.py
@@ -0,0 +1,329 @@
+import itertools
+from collections import OrderedDict
+
+from wtforms.meta import DefaultMeta
+from wtforms.utils import unset_value
+
+__all__ = ("BaseForm", "Form")
+
+_default_meta = DefaultMeta()
+
+
+class BaseForm:
+    """
+    Base Form Class.  Provides core behaviour like field construction,
+    validation, and data and error proxying.
+    """
+
+    def __init__(self, fields, prefix="", meta=_default_meta):
+        """
+        :param fields:
+            A dict or sequence of 2-tuples of partially-constructed fields.
+        :param prefix:
+            If provided, all fields will have their name prefixed with the
+            value.
+        :param meta:
+            A meta instance which is used for configuration and customization
+            of WTForms behaviors.
+        """
+        if prefix and prefix[-1] not in "-_;:/.":
+            prefix += "-"
+
+        self.meta = meta
+        self._prefix = prefix
+        self._fields = OrderedDict()
+
+        if hasattr(fields, "items"):
+            fields = fields.items()
+
+        translations = self.meta.get_translations(self)
+        extra_fields = []
+        if meta.csrf:
+            self._csrf = meta.build_csrf(self)
+            extra_fields.extend(self._csrf.setup_form(self))
+
+        for name, unbound_field in itertools.chain(fields, extra_fields):
+            field_name = unbound_field.name or name
+            options = dict(name=field_name, prefix=prefix, translations=translations)
+            field = meta.bind_field(self, unbound_field, options)
+            self._fields[name] = field
+
+        self.form_errors = []
+
+    def __iter__(self):
+        """Iterate form fields in creation order."""
+        return iter(self._fields.values())
+
+    def __contains__(self, name):
+        """Returns `True` if the named field is a member of this form."""
+        return name in self._fields
+
+    def __getitem__(self, name):
+        """Dict-style access to this form's fields."""
+        return self._fields[name]
+
+    def __setitem__(self, name, value):
+        """Bind a field to this form."""
+        self._fields[name] = value.bind(form=self, name=name, prefix=self._prefix)
+
+    def __delitem__(self, name):
+        """Remove a field from this form."""
+        del self._fields[name]
+
+    def populate_obj(self, obj):
+        """
+        Populates the attributes of the passed `obj` with data from the form's
+        fields.
+
+        :note: This is a destructive operation; Any attribute with the same name
+               as a field will be overridden. Use with caution.
+        """
+        for name, field in self._fields.items():
+            field.populate_obj(obj, name)
+
+    def process(self, formdata=None, obj=None, data=None, extra_filters=None, **kwargs):
+        """Process default and input data with each field.
+
+        :param formdata: Input data coming from the client, usually
+            ``request.form`` or equivalent. Should provide a "multi
+            dict" interface to get a list of values for a given key,
+            such as what Werkzeug, Django, and WebOb provide.
+        :param obj: Take existing data from attributes on this object
+            matching form field attributes. Only used if ``formdata`` is
+            not passed.
+        :param data: Take existing data from keys in this dict matching
+            form field attributes. ``obj`` takes precedence if it also
+            has a matching attribute. Only used if ``formdata`` is not
+            passed.
+        :param extra_filters: A dict mapping field attribute names to
+            lists of extra filter functions to run. Extra filters run
+            after filters passed when creating the field. If the form
+            has ``filter_<fieldname>``, it is the last extra filter.
+        :param kwargs: Merged with ``data`` to allow passing existing
+            data as parameters. Overwrites any duplicate keys in
+            ``data``. Only used if ``formdata`` is not passed.
+        """
+        formdata = self.meta.wrap_formdata(self, formdata)
+
+        if data is not None:
+            kwargs = dict(data, **kwargs)
+
+        filters = extra_filters.copy() if extra_filters is not None else {}
+
+        for name, field in self._fields.items():
+            field_extra_filters = filters.get(name, [])
+
+            inline_filter = getattr(self, "filter_%s" % name, None)
+            if inline_filter is not None:
+                field_extra_filters.append(inline_filter)
+
+            if obj is not None and hasattr(obj, name):
+                data = getattr(obj, name)
+            elif name in kwargs:
+                data = kwargs[name]
+            else:
+                data = unset_value
+
+            field.process(formdata, data, extra_filters=field_extra_filters)
+
+    def validate(self, extra_validators=None):
+        """
+        Validates the form by calling `validate` on each field.
+
+        :param extra_validators:
+            If provided, is a dict mapping field names to a sequence of
+            callables which will be passed as extra validators to the field's
+            `validate` method.
+
+        Returns `True` if no errors occur.
+        """
+        success = True
+        for name, field in self._fields.items():
+            if extra_validators is not None and name in extra_validators:
+                extra = extra_validators[name]
+            else:
+                extra = tuple()
+            if not field.validate(self, extra):
+                success = False
+        return success
+
+    @property
+    def data(self):
+        return {name: f.data for name, f in self._fields.items()}
+
+    @property
+    def errors(self):
+        errors = {name: f.errors for name, f in self._fields.items() if f.errors}
+        if self.form_errors:
+            errors[None] = self.form_errors
+        return errors
+
+
+class FormMeta(type):
+    """
+    The metaclass for `Form` and any subclasses of `Form`.
+
+    `FormMeta`'s responsibility is to create the `_unbound_fields` list, which
+    is a list of `UnboundField` instances sorted by their order of
+    instantiation.  The list is created at the first instantiation of the form.
+    If any fields are added/removed from the form, the list is cleared to be
+    re-generated on the next instantiation.
+
+    Any properties which begin with an underscore or are not `UnboundField`
+    instances are ignored by the metaclass.
+    """
+
+    def __init__(cls, name, bases, attrs):
+        type.__init__(cls, name, bases, attrs)
+        cls._unbound_fields = None
+        cls._wtforms_meta = None
+
+    def __call__(cls, *args, **kwargs):
+        """
+        Construct a new `Form` instance.
+
+        Creates the `_unbound_fields` list and the internal `_wtforms_meta`
+        subclass of the class Meta in order to allow a proper inheritance
+        hierarchy.
+        """
+        if cls._unbound_fields is None:
+            fields = []
+            for name in dir(cls):
+                if not name.startswith("_"):
+                    unbound_field = getattr(cls, name)
+                    if hasattr(unbound_field, "_formfield"):
+                        fields.append((name, unbound_field))
+            # We keep the name as the second element of the sort
+            # to ensure a stable sort.
+            fields.sort(key=lambda x: (x[1].creation_counter, x[0]))
+            cls._unbound_fields = fields
+
+        # Create a subclass of the 'class Meta' using all the ancestors.
+        if cls._wtforms_meta is None:
+            bases = []
+            for mro_class in cls.__mro__:
+                if "Meta" in mro_class.__dict__:
+                    bases.append(mro_class.Meta)
+            cls._wtforms_meta = type("Meta", tuple(bases), {})
+        return type.__call__(cls, *args, **kwargs)
+
+    def __setattr__(cls, name, value):
+        """
+        Add an attribute to the class, clearing `_unbound_fields` if needed.
+        """
+        if name == "Meta":
+            cls._wtforms_meta = None
+        elif not name.startswith("_") and hasattr(value, "_formfield"):
+            cls._unbound_fields = None
+        type.__setattr__(cls, name, value)
+
+    def __delattr__(cls, name):
+        """
+        Remove an attribute from the class, clearing `_unbound_fields` if
+        needed.
+        """
+        if not name.startswith("_"):
+            cls._unbound_fields = None
+        type.__delattr__(cls, name)
+
+
+class Form(BaseForm, metaclass=FormMeta):
+    """
+    Declarative Form base class. Extends BaseForm's core behaviour allowing
+    fields to be defined on Form subclasses as class attributes.
+
+    In addition, form and instance input data are taken at construction time
+    and passed to `process()`.
+    """
+
+    Meta = DefaultMeta
+
+    def __init__(
+        self,
+        formdata=None,
+        obj=None,
+        prefix="",
+        data=None,
+        meta=None,
+        **kwargs,
+    ):
+        """
+        :param formdata: Input data coming from the client, usually
+            ``request.form`` or equivalent. Should provide a "multi
+            dict" interface to get a list of values for a given key,
+            such as what Werkzeug, Django, and WebOb provide.
+        :param obj: Take existing data from attributes on this object
+            matching form field attributes. Only used if ``formdata`` is
+            not passed.
+        :param prefix: If provided, all fields will have their name
+            prefixed with the value. This is for distinguishing multiple
+            forms on a single page. This only affects the HTML name for
+            matching input data, not the Python name for matching
+            existing data.
+        :param data: Take existing data from keys in this dict matching
+            form field attributes. ``obj`` takes precedence if it also
+            has a matching attribute. Only used if ``formdata`` is not
+            passed.
+        :param meta: A dict of attributes to override on this form's
+            :attr:`meta` instance.
+        :param extra_filters: A dict mapping field attribute names to
+            lists of extra filter functions to run. Extra filters run
+            after filters passed when creating the field. If the form
+            has ``filter_<fieldname>``, it is the last extra filter.
+        :param kwargs: Merged with ``data`` to allow passing existing
+            data as parameters. Overwrites any duplicate keys in
+            ``data``. Only used if ``formdata`` is not passed.
+        """
+        meta_obj = self._wtforms_meta()
+        if meta is not None and isinstance(meta, dict):
+            meta_obj.update_values(meta)
+        super().__init__(self._unbound_fields, meta=meta_obj, prefix=prefix)
+
+        for name, field in self._fields.items():
+            # Set all the fields to attributes so that they obscure the class
+            # attributes with the same names.
+            setattr(self, name, field)
+        self.process(formdata, obj, data=data, **kwargs)
+
+    def __setitem__(self, name, value):
+        raise TypeError("Fields may not be added to Form instances, only classes.")
+
+    def __delitem__(self, name):
+        del self._fields[name]
+        setattr(self, name, None)
+
+    def __delattr__(self, name):
+        if name in self._fields:
+            self.__delitem__(name)
+        else:
+            # This is done for idempotency, if we have a name which is a field,
+            # we want to mask it by setting the value to None.
+            unbound_field = getattr(self.__class__, name, None)
+            if unbound_field is not None and hasattr(unbound_field, "_formfield"):
+                setattr(self, name, None)
+            else:
+                super().__delattr__(name)
+
+    def validate(self, extra_validators=None):
+        """Validate the form by calling ``validate`` on each field.
+        Returns ``True`` if validation passes.
+
+        If the form defines a ``validate_<fieldname>`` method, it is
+        appended as an extra validator for the field's ``validate``.
+
+        :param extra_validators: A dict mapping field names to lists of
+            extra validator methods to run. Extra validators run after
+            validators passed when creating the field. If the form has
+            ``validate_<fieldname>``, it is the last extra validator.
+        """
+        if extra_validators is not None:
+            extra = extra_validators.copy()
+        else:
+            extra = {}
+
+        for name in self._fields:
+            inline = getattr(self.__class__, f"validate_{name}", None)
+            if inline is not None:
+                extra.setdefault(name, []).append(inline)
+
+        return super().validate(extra)
diff --git a/venv/Lib/site-packages/wtforms/i18n.py b/venv/Lib/site-packages/wtforms/i18n.py
new file mode 100644
index 0000000000000000000000000000000000000000..73eae637108ea4db8ac5b06711061fd7a2b73a65
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/i18n.py
@@ -0,0 +1,72 @@
+import os
+
+
+def messages_path():
+    """
+    Determine the path to the 'messages' directory as best possible.
+    """
+    module_path = os.path.abspath(__file__)
+    locale_path = os.path.join(os.path.dirname(module_path), "locale")
+    if not os.path.exists(locale_path):  # pragma: no cover
+        locale_path = "/usr/share/locale"
+    return locale_path
+
+
+def get_builtin_gnu_translations(languages=None):
+    """
+    Get a gettext.GNUTranslations object pointing at the
+    included translation files.
+
+    :param languages:
+        A list of languages to try, in order. If omitted or None, then
+        gettext will try to use locale information from the environment.
+    """
+    import gettext
+
+    return gettext.translation("wtforms", messages_path(), languages)
+
+
+def get_translations(languages=None, getter=get_builtin_gnu_translations):
+    """
+    Get a WTForms translation object which wraps a low-level translations object.
+
+    :param languages:
+        A sequence of languages to try, in order.
+    :param getter:
+        A single-argument callable which returns a low-level translations object.
+    """
+    return getter(languages)
+
+
+class DefaultTranslations:
+    """
+    A WTForms translations object to wrap translations objects which use
+    ugettext/ungettext.
+    """
+
+    def __init__(self, translations):
+        self.translations = translations
+
+    def gettext(self, string):
+        return self.translations.ugettext(string)
+
+    def ngettext(self, singular, plural, n):
+        return self.translations.ungettext(singular, plural, n)
+
+
+class DummyTranslations:
+    """
+    A translations object which simply returns unmodified strings.
+
+    This is typically used when translations are disabled or if no valid
+    translations provider can be found.
+    """
+
+    def gettext(self, string):
+        return string
+
+    def ngettext(self, singular, plural, n):
+        if n == 1:
+            return singular
+
+        return plural
diff --git a/venv/Lib/site-packages/wtforms/locale/README.md b/venv/Lib/site-packages/wtforms/locale/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..bcdf7b70fd9d69f8b50273c0ae632abe3e3f58e6
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/README.md
@@ -0,0 +1,62 @@
+Translations
+============
+
+WTForms uses gettext to provide translations. Translations for various
+strings rendered by WTForms are created and updated by the community. If
+you notice that your locale is missing, or find a translation error,
+please submit a fix.
+
+
+Create
+------
+
+To create a translation, initialize a catalog in the new locale:
+
+```
+$ pybabel init --input-file src/wtforms/locale/wtforms.pot --output-dir src/wtforms/locale --domain wtforms --locale <your locale>
+```
+
+This will create some folders under the locale name and copy the
+template.
+
+Update
+------
+
+To add new translatable string to the catalog:
+
+```
+pybabel extract --copyright-holder="WTForms Team" --project="WTForms" --version="$(python -c 'import wtforms; print(wtforms.__version__)')" --output-file src/wtforms/locale/wtforms.pot src/wtforms
+```
+
+Edit
+----
+
+After creating a translation, or to edit an existing translation, open
+the ``.po`` file. While they can be edited by hand, there are also tools
+that make working with gettext files easier.
+
+Make sure the `.po` file:
+
+- Is a valid UTF-8 text file.
+- Has the header filled out appropriately.
+- Translates all messages.
+
+
+Verify
+------
+
+After working on the catalog, verify that it compiles and produces the
+correct translations.
+
+```
+$ pybabel compile --directory src/wtforms/locale --domain wtforms --statistics
+```
+
+Try loading your translations into some sample code to verify they look
+correct.
+
+
+Submit
+------
+
+To submit your translation, create a pull request on GitHub.
diff --git a/venv/Lib/site-packages/wtforms/locale/ar/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/ar/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..723b02319c2499493acad15ece3b33e502072b78
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/ar/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/ar/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/ar/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..4ccef2ed8ee9c2c91d4c410d496dc6555ac596a8
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/ar/LC_MESSAGES/wtforms.po
@@ -0,0 +1,205 @@
+# Arabic translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2015-04-08 20:59+0100\n"
+"Last-Translator: Jalal Maqdisi <jalal.maqdisi@gmail.com>\n"
+"Language-Team: ar <LL@li.org>\n"
+"Language: ar\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=6; plural=(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n>=3 && "
+"n<=10 ? 3 : n>=11 && n<=99 ? 4 : 5)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "اسم الحقل '%s' غير صالح."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "يجب على الحقل ان يساوي %(other_name)s ."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "لا يمكن للحقل ان يحتوي على اقل من %(min)d حرف."
+msgstr[1] "لا يمكن للحقل ان يحتوي على اقل من %(min)d حروف."
+msgstr[2] ""
+msgstr[3] ""
+msgstr[4] ""
+msgstr[5] ""
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "لا يمكن للحقل ان يحتوي على اكثر من %(max)d حرف."
+msgstr[1] "لا يمكن للحقل ان يحتوي على اكثر من %(max)d حروف."
+msgstr[2] ""
+msgstr[3] ""
+msgstr[4] ""
+msgstr[5] ""
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+msgstr[2] ""
+msgstr[3] ""
+msgstr[4] ""
+msgstr[5] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "يجب على طول الحقل ان يكون ما بين %(min)d و %(max)d حرف."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "لا يجب على الرقم ان يقل عن %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "لا يجب على الرقم ان يزيد عن %(max)s. "
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "يجب على الرقم ان يكون ما بين %(min)s و %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "هذا الحقل مطلوب."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "الاملاء غير صالح."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "البريد الالكتروني غير صالح."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "پروتوكول الانترنت IP غير صالح."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "عنوان Mac غير صالح."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "عنوان الرابط غير صالح."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "عنوان UUID غير صالح."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "قيمة غير صالحة، يجب أن تكون واحدة من: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "قيمة غير صالحة، يجب أن تكون اي من: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "هذا الحقل مطلوب."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "رمز CSRF غير صالح."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "رمز CSRF مفقود."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF قد فشل."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "انتهت صلاحية رمز CSRF."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "اختيار غير صالح: لا يمكن الاجبار."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "اختيار غير صحيح."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "اختيارات غير صالحة: واحدة او اكثر من الادخالات لا يمكن اجبارها."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "القيمة '%(value)s' ليست باختيار صحيح لهذا الحقل."
+msgstr[1] "القيمة '%(value)s' ليست باختيار صحيح لهذا الحقل."
+msgstr[2] "القيمة '%(value)s' ليست باختيار صحيح لهذا الحقل."
+msgstr[3] "القيمة '%(value)s' ليست باختيار صحيح لهذا الحقل."
+msgstr[4] "القيمة '%(value)s' ليست باختيار صحيح لهذا الحقل."
+msgstr[5] "القيمة '%(value)s' ليست باختيار صحيح لهذا الحقل."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "قيمة الوقت والتاريخ غير صالحة."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "قيمة التاريخ غير صالحة."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "قيمة التاريخ غير صالحة."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "قيمة العدد الحقيقي غير صالحة."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "القيمة العشرية غير صالحة."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "القيمة العائمة غير صالحة."
diff --git a/venv/Lib/site-packages/wtforms/locale/bg/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/bg/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..33cb9fd887566926bc353dd7c74d21f2ec354865
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/bg/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/bg/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/bg/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..a88b39903beba444bab6aae39f7459c1db3f2131
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/bg/LC_MESSAGES/wtforms.po
@@ -0,0 +1,190 @@
+# Bulgarian (Bulgaria) translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2017-02-16 11:59+0100\n"
+"Last-Translator: \n"
+"Language-Team: Vladimir Kolev <me@vkolev.net>\n"
+"Language: bg_BG\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Невалидно име на поле '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Полето трябва да е еднакво с %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Полето трябва да бъде дълго поне %(min)d символ."
+msgstr[1] "Полето трябва да бъде дълго поне %(min)d символа."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Полето не моде да бъде по-дълго от %(max)d символ."
+msgstr[1] "Полето не моде да бъде по-дълго от %(max)d символа."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Дължината на полето трябва да бъде между %(min)d и %(max)d символа."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Числото трябва да е поне %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Числото трябва да е максимално %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Числото трябва да бъде между %(min)s и %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Това поле е задължително"
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Невалидно въвеждане."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Невалиден Е-мейл адрес."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Невалиден IP адрес."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Невалиден MAC адрес."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Невалиден URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Невалиден UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Невалидна стойност, трябва да бъде една от: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Невалидна стойност, не може да бъде една от: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Това поле е задължително"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Невалиден CSRF Token"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF token липсва"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF провален"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF token изтече"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Невалиден избор: не може да бъде преобразувана"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Не е валиден избор"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Невалиден(и) избор(и): една или повече въведени данни не могат да бъдат "
+"преобразувани"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' не е валиден избор за това поле"
+msgstr[1] "'%(value)s' не е валиден избор за това поле"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Не е валидна стойност за дата и време"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Не е валидна стойност за дата"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Не е валидна стойност за дата"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Не е валидна цифрова стойност"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Не е валидна десетична стойност"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Не е валидна стойност с плаваща запетая"
diff --git a/venv/Lib/site-packages/wtforms/locale/ca/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/ca/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..f34b10eb384f7d16d574af8cf4fcc3590988dc66
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/ca/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/ca/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/ca/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..0015e67d76ef913078699f487f1855e7b6f7866f
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/ca/LC_MESSAGES/wtforms.po
@@ -0,0 +1,189 @@
+# Catalan translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2014-01-16 09:58+0100\n"
+"Last-Translator: Òscar Vilaplana <oscar.vilaplana@paylogic.eu>\n"
+"Language-Team: ca <oscar.vilaplana@paylogic.eu>\n"
+"Language: ca\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Nom de camp no vàlid '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "El camp ha de ser igual a %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "El camp ha de contenir almenys %(min)d caràcter."
+msgstr[1] "El camp ha de contenir almenys %(min)d caràcters."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "El camp no pot contenir més d'%(max)d caràcter."
+msgstr[1] "El camp no pot contenir més de %(max)d caràcters."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "El camp ha de contenir entre %(min)d i %(min)d caràcters."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "El nombre ha de ser major o igual a %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "El nombre ha de ser com a màxim %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "El nombre ha d'estar entre %(min)s i %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Aquest camp és obligatori."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Valor no vàlid."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Adreça d'e-mail no vàlida."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Adreça IP no vàlida."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Adreça MAC no vàlida."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "URL no vàlida."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID no vàlid."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Valor no vàlid, ha de ser un d'entre: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Valor no vàlid, no pot ser cap d'aquests: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Aquest camp és obligatori."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Token CSRF no vàlid"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Falta el token CSRF"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "Ha fallat la comprovació de CSRF"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Token CSRF caducat"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Opció no vàlida"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Opció no acceptada"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Opció o opcions no vàlides: alguna de les entrades no s'ha pogut processar"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' no és una opció acceptada per a aquest camp"
+msgstr[1] "'%(value)s' no és una opció acceptada per a aquest camp"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Valor de data i hora no vàlid"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Valor de data no vàlid"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Valor de data no vàlid"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Valor enter no vàlid"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Valor decimal no vàlid"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Valor en coma flotant no vàlid"
diff --git a/venv/Lib/site-packages/wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..14684d379d6ec3834fe9227eb78a374a2da01d96
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..a0cfb6fd78b583d029bd30b9593052d8b8a29729
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/cs_CZ/LC_MESSAGES/wtforms.po
@@ -0,0 +1,192 @@
+# Czech (Czechia) translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0.2dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: Daniil Barabash <ghostbarik@gmail.com>\n"
+"Language-Team: cz <ghostbarik@gmail.com>\n"
+"Language: cs_CZ\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=3; plural=((n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Neplatný název pole '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Hodnota pole má být stejná jako u %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Počet znaků daného pole má být minimálně %(min)d."
+msgstr[1] "Počet znaků daného pole má být minimálně %(min)d."
+msgstr[2] ""
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Počet znaků daného pole má byt maximálně %(max)d."
+msgstr[1] "Počet znaků daného pole má byt maximálně %(max)d."
+msgstr[2] ""
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+msgstr[2] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Délka pole ma být mezi %(min)d a %(max)d."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Hodnota čísla má být alespoň %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Hodnota čísla má být maximálně %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Hodnota čísla má být mezi %(min)s and %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Toto pole je povinné."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Neplatný vstup."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Neplatná emailová adresa."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Neplatná IP adresa."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Neplatná MAC adresa."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Neplatné URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Neplatné UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Neplatná hodnota, povolené hodnoty jsou: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Neplatná hodnota, nesmí být mezi: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Toto pole je povinné."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Neplatný CSRF token."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Chybí CSRF token."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "Chyba CSRF."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Hodnota CSRF tokenu."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Neplatná volba: nelze převést."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Neplatná volba."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Neplatná volba: jeden nebo více datových vstupů nemohou být převedeny."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' není platnou volbou pro dané pole."
+msgstr[1] "'%(value)s' není platnou volbou pro dané pole."
+msgstr[2] "'%(value)s' není platnou volbou pro dané pole."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Neplatná hodnota pro datum a čas."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Neplatná hodnota pro datum."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Neplatná hodnota pro datum."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Neplatná hodnota pro celé číslo."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Neplatná hodnota pro desetinné číslo."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Neplatná hodnota pro desetinné číslo."
diff --git a/venv/Lib/site-packages/wtforms/locale/cy/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/cy/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..cfd16c32778373814430d91ee39737016b06b3eb
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/cy/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/cy/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/cy/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..a13c8a06ac3a2b1ac24f3c47f76e0d5009164d12
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/cy/LC_MESSAGES/wtforms.po
@@ -0,0 +1,189 @@
+# Welsh translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2015-01-29 14:07+0000\n"
+"Last-Translator: Josh Rowe josh.rowe@digital.justice.gov.uk\n"
+"Language-Team: cy <LL@li.org>\n"
+"Language: cy\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Enw maes annilys '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Rhaid i'r maes fod yr un fath â/ag %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Mae'n rhaid i Maes fod o leiaf %(min)d cymeriad hir."
+msgstr[1] "Mae'n rhaid i Maes fod o leiaf %(min)d nod o hyd."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Ni all Maes fod yn hirach na %(max)d cymeriad."
+msgstr[1] "Ni all Maes fod yn fwy na %(max)d cymeriadau."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Rhaid i'r maes fod rhwng %(min)d a %(max)d o nodau"
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Rhaid i'r rhif fod o leiaf %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Ni chaiff y rhif fod yn fwy na %(max)s. "
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Rhaid i'r rhif fod rhwng %(min)s a %(max)s. "
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Rhaid cwblhau'r maes hwn."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Mewnbwn annilys"
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Cyfeiriad e-bost annilys"
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Cyfeiriad IP annilys"
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Cyfeiriad Mac annilys."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "URL annilys."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID annilys."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Gwerth annilys, rhaid i'r gwerth fod yn un o'r canlynol: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Gwerth annilys, ni all fod yn un o'r canlynol: %(values)s"
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Rhaid cwblhau'r maes hwn."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Tocyn CSRF annilys"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Tocyn CSRF ar goll"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF wedi methu"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Tocyn CSRF wedi dod i ben"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Dewis annilys: ddim yn bosib gweithredu"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Nid yw hwn yn ddewis dilys"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Dewis(iadau) annilys: ddim yn bosib gweithredu un neu ragor o fewnbynnau data"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "Nid yw '%(value)s' yn ddewis dilys ar gyfer y maes hwn"
+msgstr[1] "Nid yw '%(value)s' yn ddewis dilys ar gyfer y maes hwn"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Gwerth dyddiad/amser annilys"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Gwerth dyddiad annilys"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Gwerth dyddiad annilys"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Gwerth integer annilys"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Gwerth degolyn annilys"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Gwerth float annilys"
diff --git a/venv/Lib/site-packages/wtforms/locale/de/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/de/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..69fa09b68f8de83fd9a6db837e71d15f72d0740c
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/de/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/de/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/de/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..f83ed2a7bc53f7e6316308c9072e2ab677f4c3b5
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/de/LC_MESSAGES/wtforms.po
@@ -0,0 +1,190 @@
+# German translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.4\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2013-05-13 19:27+0100\n"
+"Last-Translator: Chris Buergi <chris.buergi@gmx.net>\n"
+"Language-Team: de <LL@li.org>\n"
+"Language: de\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Ungültiger Feldname '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Feld muss gleich wie %(other_name)s sein."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Feld muss mindestens %(min)d Zeichen beinhalten."
+msgstr[1] "Feld muss mindestens %(min)d Zeichen beinhalten."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Feld kann nicht länger als %(max)d Zeichen sein."
+msgstr[1] "Feld kann nicht länger als %(max)d Zeichen sein."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Feld muss zwischen %(min)d und %(max)d Zeichen beinhalten."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Zahl muss mindestens %(min)s sein."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Zahl kann höchstens %(max)s sein."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Zahl muss zwischen %(min)s und %(max)s liegen."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Dieses Feld wird benötigt."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Ungültige Eingabe."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Ungültige E-Mail-Adresse."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Ungültige IP-Adresse."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Ungültige Mac-Adresse."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Ungültige URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Ungültige UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Ungültiger Wert. Mögliche Werte: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Ungültiger Wert. Wert kann keiner von folgenden sein: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Dieses Feld wird benötigt."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Ungültiger CSRF-Code."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF-Code nicht vorhanden."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF fehlgeschlagen."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF-Code verfallen."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Ungültige Auswahl: Konnte nicht umwandeln."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Keine gültige Auswahl."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Ungültige Auswahl: Einer oder mehrere Eingaben konnten nicht umgewandelt "
+"werden."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' ist kein gültige Auswahl für dieses Feld."
+msgstr[1] "'%(value)s' ist kein gültige Auswahl für dieses Feld."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Kein gültiges Datum mit Zeit."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Kein gültiges Datum."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Kein gültiges Datum."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Keine gültige, ganze Zahl."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Keine gültige Dezimalzahl."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Keine gültige Gleitkommazahl."
diff --git a/venv/Lib/site-packages/wtforms/locale/de_CH/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/de_CH/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..9abf433cc91e596a7d9259a4da6eee0f707a1aeb
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/de_CH/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/de_CH/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/de_CH/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..a09f7e546e48ce88d39b504fe02de24da3d59f7c
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/de_CH/LC_MESSAGES/wtforms.po
@@ -0,0 +1,190 @@
+# German (Switzerland) translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.4\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2013-05-13 19:27+0100\n"
+"Last-Translator: Chris Buergi <chris.buergi@gmx.net>\n"
+"Language-Team: de_CH <LL@li.org>\n"
+"Language: de_CH\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Ungültiger Feldname '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Feld muss gleich wie %(other_name)s sein."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Feld muss mindestens %(min)d Zeichen beinhalten."
+msgstr[1] "Feld muss mindestens %(min)d Zeichen beinhalten."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Feld kann nicht länger als %(max)d Zeichen sein."
+msgstr[1] "Feld kann nicht länger als %(max)d Zeichen sein."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Feld muss zwischen %(min)d und %(max)d Zeichen beinhalten."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Zahl muss mindestens %(min)s sein."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Zahl kann höchstens %(max)s sein."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Zahl muss zwischen %(min)s und %(max)s liegen."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Dieses Feld wird benötigt."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Ungültige Eingabe."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Ungültige Email-Adresse."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Ungültige IP-Adresse."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Ungültige Mac-Adresse."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Ungültige URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Ungültige UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Ungültiger Wert. Mögliche Werte: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Ungültiger Wert. Wert kann keiner von folgenden sein: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Dieses Feld wird benötigt."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Ungültiger CSRF-Code"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF-Code nicht vorhanden"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF fehlgeschlagen"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF-Code verfallen"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Ungültige Auswahl: Konnte nicht umwandeln"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Keine gültige Auswahl"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Ungültige Auswahl: Einer oder mehrere Eingaben konnten nicht umgewandelt "
+"werden."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' ist kein gültige Auswahl für dieses Feld."
+msgstr[1] "'%(value)s' ist kein gültige Auswahl für dieses Feld."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Kein gültiges Datum mit Zeit"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Kein gültiges Datum"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Kein gültiges Datum"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Keine gültige, ganze Zahl"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Keine gültige Dezimalzahl"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Keine gültige Gleitkommazahl"
diff --git a/venv/Lib/site-packages/wtforms/locale/el/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/el/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..6c77216898e2fb529e62b9470f6ee47fa1485451
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/el/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/el/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/el/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..06d5587556b2a7023c2680600c3d3e4975b7ece2
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/el/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# Greek translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2014-04-04 20:18+0300\n"
+"Last-Translator: Daniel Dourvaris <dourvaris@gmail.com>\n"
+"Language-Team: el <LL@li.org>\n"
+"Language: el\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Λάθος όνομα πεδίου '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Το πεδίο πρέπει να είναι το ίδιο με το %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Το πεδίο πρέπει να έχει τουλάχιστον %(min)d χαρακτήρα."
+msgstr[1] "Το πεδίο πρέπει να έχει τουλάχιστον %(min)d χαρακτήρες."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Το πεδίο δεν μπορεί να έχει πάνω από %(max)d χαρακτήρα."
+msgstr[1] "Το πεδίο δεν μπορεί να έχει πάνω από %(max)d χαρακτήρες."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Το πεδίο πρέπει να είναι ανάμεσα από %(min)d και %(max)d χαρακτήρες."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Το νούμερο πρέπει να είναι τουλάχιστον %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Το νούμερο πρέπει να είναι μέγιστο %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Το νούμερο πρέπει να είναι ανάμεσα %(min)s και %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Αυτό το πεδίο είναι υποχρεωτικό"
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Λανθασμένα δεδομένα"
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Λανθασμένο email."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Λανθασμένη διεύθυνση IP."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Λανθασμένο διεύθυνση Mac."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Λανθασμένο URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Λανθασμένο UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Λάθος επιλογή, πρέπει να είναι ένα από: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Λάθος επιλογή, δεν μπορεί να είναι ένα από: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Αυτό το πεδίο είναι υποχρεωτικό"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Λάθος CSRF"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "To CSRF δεν υπάρχει"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "Αποτυχία CSRF"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Έχει λήξει το διακριτικό CSRF"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Λανθασμένη Επιλογή: δεν μετατρέπεται"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Άγνωστη επιλογή"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Λανθασμένη επιλογή(ές): κάποιες τιμές δεν μπορούσαν να μετατραπούν"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' δεν είναι έγκυρη επιλογή για αυτό το πεδίο"
+msgstr[1] "'%(value)s' δεν είναι έγκυρη επιλογή για αυτό το πεδίο"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Δεν είναι σωστή ημερομηνία/ώρα"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Δεν είναι σωστή ημερομηνία"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Δεν είναι σωστή ημερομηνία"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Δεν είναι ακέραιο νούμερο"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Δεν είναι δεκαδικό"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Δεν είναι δεκαδικό"
diff --git a/venv/Lib/site-packages/wtforms/locale/en/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/en/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..2e8a0b13d00e186354e4e2ff30757a6b8cf00ad1
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/en/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/en/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/en/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..af40f6483c5727d8a98568c2ff10f0d563610d16
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/en/LC_MESSAGES/wtforms.po
@@ -0,0 +1,169 @@
+# English translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.4\n"
+"Report-Msgid-Bugs-To: wtforms@simplecodes.com\n"
+"POT-Creation-Date: 2020-04-25 11:34-0700\n"
+"PO-Revision-Date: 2013-04-28 15:36-0700\n"
+"Last-Translator: James Crasta <james+i18n@simplecodes.com>\n"
+"Language: en\n"
+"Language-Team: en_US <james+i18n@simplecodes.com>\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:87
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Invalid field name '%s'."
+
+#: src/wtforms/validators.py:98
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Field must be equal to %(other_name)s."
+
+#: src/wtforms/validators.py:134
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Field must be at least %(min)d character long."
+msgstr[1] "Field must be at least %(min)d characters long."
+
+#: src/wtforms/validators.py:140
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Field cannot be longer than %(max)d character."
+msgstr[1] "Field cannot be longer than %(max)d characters."
+
+#: src/wtforms/validators.py:146
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "Field must be exactly %(max)d character long."
+msgstr[1] "Field must be exactly %(max)d characters long."
+
+#: src/wtforms/validators.py:152
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Field must be between %(min)d and %(max)d characters long."
+
+#: src/wtforms/validators.py:197
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Number must be at least %(min)s."
+
+#: src/wtforms/validators.py:199
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Number must be at most %(max)s."
+
+#: src/wtforms/validators.py:201
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Number must be between %(min)s and %(max)s."
+
+#: src/wtforms/validators.py:269 src/wtforms/validators.py:294
+msgid "This field is required."
+msgstr "This field is required."
+
+#: src/wtforms/validators.py:327
+msgid "Invalid input."
+msgstr "Invalid input."
+
+#: src/wtforms/validators.py:387
+msgid "Invalid email address."
+msgstr "Invalid email address."
+
+#: src/wtforms/validators.py:423
+msgid "Invalid IP address."
+msgstr "Invalid IP address."
+
+#: src/wtforms/validators.py:466
+msgid "Invalid Mac address."
+msgstr "Invalid MAC address."
+
+#: src/wtforms/validators.py:501
+msgid "Invalid URL."
+msgstr "Invalid URL."
+
+#: src/wtforms/validators.py:522
+msgid "Invalid UUID."
+msgstr "Invalid UUID."
+
+#: src/wtforms/validators.py:553
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Invalid value, must be one of: %(values)s."
+
+#: src/wtforms/validators.py:588
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Invalid value, can't be any of: %(values)s."
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Invalid CSRF Token."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF token missing."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF failed."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF token expired."
+
+#: src/wtforms/fields/core.py:534
+msgid "Invalid Choice: could not coerce."
+msgstr "Invalid Choice: could not coerce."
+
+#: src/wtforms/fields/core.py:538
+msgid "Choices cannot be None."
+msgstr "Choices cannot be None."
+
+#: src/wtforms/fields/core.py:545
+msgid "Not a valid choice."
+msgstr "Not a valid choice."
+
+#: src/wtforms/fields/core.py:573
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Invalid choice(s): one or more data inputs could not be coerced."
+
+#: src/wtforms/fields/core.py:584
+#, python-format
+msgid "'%(value)s' is not a valid choice for this field."
+msgstr "'%(value)s' is not a valid choice for this field."
+
+#: src/wtforms/fields/core.py:679 src/wtforms/fields/core.py:689
+msgid "Not a valid integer value."
+msgstr "Not a valid integer value."
+
+#: src/wtforms/fields/core.py:760
+msgid "Not a valid decimal value."
+msgstr "Not a valid decimal value."
+
+#: src/wtforms/fields/core.py:788
+msgid "Not a valid float value."
+msgstr "Not a valid float value."
+
+#: src/wtforms/fields/core.py:853
+msgid "Not a valid datetime value."
+msgstr "Not a valid datetime value."
+
+#: src/wtforms/fields/core.py:871
+msgid "Not a valid date value."
+msgstr "Not a valid date value."
+
+#: src/wtforms/fields/core.py:889
+msgid "Not a valid time value."
+msgstr "Not a valid time value."
diff --git a/venv/Lib/site-packages/wtforms/locale/es/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/es/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..6f19672dd90d91a7ee4919ec8e4ea21e29f8759b
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/es/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/es/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/es/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..df705b15ae937ccea14bac7ed43058e78efd13fd
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/es/LC_MESSAGES/wtforms.po
@@ -0,0 +1,187 @@
+# Spanish translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0\n"
+"Report-Msgid-Bugs-To: eloi.rivard@nubla.fr\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2023-10-06 21:11+0000\n"
+"Last-Translator: gallegonovato <fran-carro@hotmail.es>\n"
+"Language-Team: Spanish <https://hosted.weblate.org/projects/wtforms/wtforms/"
+"es/>\n"
+"Language: es\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=n != 1;\n"
+"X-Generator: Weblate 5.1-dev\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Nombre de campo inválido '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "El campo debe coincidir con %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "El campo debe tener al menos %(min)d caracter."
+msgstr[1] "El campo debe tener al menos %(min)d caracteres."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "El campo no puede tener más de %(max)d caracter."
+msgstr[1] "El campo no puede tener más de %(max)d caracteres."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "El campo debe ser exactamente %(max)d caracter."
+msgstr[1] "El campo debe ser exactamente %(max)d caracteres."
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "El campo debe tener entre %(min)d y %(max)d caracteres."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "El número debe ser mayor que %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "El número debe ser menor que %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "El número debe estar entre %(min)s y %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Este campo es obligatorio."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Valor inválido."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Email inválido."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Dirección IP inválida."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Dirección MAC inválida."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "URL inválida."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID inválido."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Valor inválido, debe ser uno de: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Valor inválido, no puede ser ninguno de: %(values)s."
+
+#: src/wtforms/validators.py:698
+msgid "This field cannot be edited"
+msgstr "Este campo no se puede editar"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr "Este campo está deshabilitado y no puede tener un valor"
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "El token CSRF es incorrecto."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "El token CSRF falta."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "Fallo CSRF."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "El token CSRF ha expirado."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Elección inválida: no se puede ajustar."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "La elección no puede ser None."
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Opción inválida."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Opción(es) inválida(s): una o más entradas de datos no pueden ser "
+"coaccionadas."
+
+#: src/wtforms/fields/choices.py:204
+#, python-format
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "%(value)s' no es una opción válida para este campo."
+msgstr[1] "%(value)s' no son opciones válidas para este campo."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "No es un valor para la fecha y la hora válido."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "No es una fecha válida."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "No es un tiempo válido."
+
+#: src/wtforms/fields/datetime.py:148
+msgid "Not a valid week value."
+msgstr "No es un valor semanal válido."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "No es un valor entero válido."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "No es un numero decimal válido."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "No es un número de punto flotante válido."
diff --git a/venv/Lib/site-packages/wtforms/locale/et/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/et/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..d85f925c9d53ff918c7177800eba08603e85bbe0
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/et/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/et/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/et/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..7d5ff0e6544b1f7b57579a44ab29e37c2c71a46e
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/et/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# Estonian translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.6dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2013-09-22 12:37+0300\n"
+"Last-Translator: Laur Mõtus <laur@povi.ee>\n"
+"Language-Team: Estonian <kde-i18n-doc@kde.org>\n"
+"Language: et\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Vigane välja nimi: '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Väli peab võrduma %(other_name)s -ga."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Väli peab olema vähemalt %(min)d tähemärgi pikkune."
+msgstr[1] "Väli peab olema vähemalt %(min)d tähemärgi pikkune."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Väli ei tohi olla üle %(max)d tähemärgi pikk."
+msgstr[1] "Väli ei tohi olla üle %(max)d tähemärgi pikk."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Välja pikkus peab olema vahemikus %(min)d -  %(max)d."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Number peab olema vähemalt %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Number tohib olla maksimaalselt %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Number peab olema vahemikus %(min)s - %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Kohustuslik väli."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Vigane sisend."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Vigane e-posti aadress."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Vigane IP aadress."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Vigane MAC aadress."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Vigane URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Vigane UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Vigane väärtus, peaks hoopis olema üks järgmistest: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Vigane väärtus, ei tohi olla ükski järgnevatest: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Kohustuslik väli."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Vigane CSRF tunnus"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Puudub CSRF tunnus"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF nurjus"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF tunnus on aegunud"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Vigane valik: ei saa teisendada"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Pole korrektne valik"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Vigane valik: ühte või rohkemat andmesisendit ei saa teisendada"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' pole sellele väljale korrektne valik"
+msgstr[1] "'%(value)s' pole sellele väljale korrektne valik"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Pole korrektne kuupäeva/kellaaja väärtus"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Pole korrektne kuupäevaline väärtus"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Pole korrektne kuupäevaline väärtus"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Pole korrektne täisarvuline väärtus"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Pole korrektne kümnendarvuline väärtus"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Pole korrektne ujukomaarvuline väärtus"
diff --git a/venv/Lib/site-packages/wtforms/locale/fa/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/fa/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..1acf6ef1dbadc33fad0998c91232b90a1c919412
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/fa/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/fa/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/fa/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..127cb56d275ad6ce7add71482b15ba04ee411708
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/fa/LC_MESSAGES/wtforms.po
@@ -0,0 +1,187 @@
+# persian translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.3\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2013-01-20 16:49+0330\n"
+"Last-Translator: mohammad Efazati <mohammad@efazati.org>\n"
+"Language-Team: fa <mohammad@efazati.org>\n"
+"Language: persian\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "فیلد '%s' اشتباه است."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "مقدار فیلد باید برابر %(other_name)s باشد."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "طول فیلد حداقل باید %(min)d حرف باشد."
+msgstr[1] "طول فیلد حداقل باید %(min)d حرف باشد."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "طول فیلد حداکثر باید %(max)d حرف باشد."
+msgstr[1] "طول فیلد حداکثر باید %(max)d حرف باشد."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "طول فیلد باید بین %(min)d تا %(max)d حرف باشد."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "عدد باید از %(min)s بزرگتر باشد."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "عدد باید از %(max)s. کوچکتر باشد."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "عدد باید بین %(min)s  تا  %(max)s باشد."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "این فیلد اجباریست."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "ورودی اشتباه است."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "آدرس پست الکترونیک اشتباه است."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "آدرس IP اشتباه است."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "آدرس MAC اشتباه است."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "آدرس وب سایت وارد شده اشتباه است."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID اشتباده است."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "ورودی اشتباه است. باید یکی از %(values)s باشد."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "ورودی اشتباه است. نباید یکی از %(values)s باشد."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "این فیلد اجباریست."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "مقدار کلید امنیتی اشتباه است."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "مقدار کلید امنیتی در درخواست شما نیست."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "کلید امنیتی با خطا مواجه شد."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "زمان استفاده از کلید امنیتی به اتمام رسیده است."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "انتخاب شما اشتباه است. ورودی قابل بررسی نیست."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "انتخاب درستی نیست."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "انتخاب شما اشتباه است. یک یا چند تا از ورودی ها قابل بررسی نیست."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s انتخاب مناسبی برای این فیلد نیست."
+msgstr[1] "'%(value)s انتخاب مناسبی برای این فیلد نیست."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "مقداری که وارد کردید، تاریخ نیست."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "مقداری که وارد کردید، تاریخ نیست."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "مقداری که وارد کردید، تاریخ نیست."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "یک عدد درست نیست."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "یک عدد اعشاری درست نیست."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "یک عدد اعشاری درست نیست."
diff --git a/venv/Lib/site-packages/wtforms/locale/fi/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/fi/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..3b62bc432727e8e57d0a63539fbaaeeaa2a8612a
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/fi/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/fi/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/fi/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..0bea31ba58ee479fc513a87d9686cd8dc4363804
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/fi/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# Finnish translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2016-06-13 15:16+0300\n"
+"Last-Translator: Teijo Mursu <zcmander+wtforms@gmail.com>\n"
+"Language-Team: fi <LL@li.org>\n"
+"Language: fi\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Epäkelpo kentän nimi '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Täytyy olla sama kuin %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Täytyy olla vähintään %(min)d merkki."
+msgstr[1] "Täytyy olla vähintään %(min)d merkkiä."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Ei voi olla pidempi kuin %(max)d merkki."
+msgstr[1] "Ei voi olla pidempi kuin %(max)d merkkiä."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Täytyy olla pidempi kuin %(min)d ja lyhyempi kuin %(max)d."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Vähintään %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Enintään %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Täytyy olla välillä %(min)s - %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Pakollinen."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Virheellinen syöte."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Virheellinen sähköpostiosoite."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Virheellinen IP-osoite"
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Virheellinen MAC-osoite."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Virheellinen URL-osoite."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Virheellinen UUID-tunnus."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Epäkelpo arvo, täytyy olla yksi seuraavista: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Epäkelpo arvo, ei voi olla yksi seuraavista: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Pakollinen."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Virheellienen CSRF-tunnus."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF-tunnus puuttuu."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF epäonnistui"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF-tunnus vanhentunut"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Virheellinen valinta: ei voida muuntaa"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Virheellinen valinta"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Virheellinen valinta: Yksi tai useampaa syötettä ei voitu muuntaa"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' ei ole kelvollinen valinta tälle kentälle"
+msgstr[1] "'%(value)s' ei ole kelvollinen valinta tälle kentälle"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Ei ole kelvollinen päivämäärä ja aika -arvo"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Ei ole kelvollinen päivämäärä-arvo"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Ei ole kelvollinen päivämäärä-arvo"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Ei ole kelvollinen kokonaisluku"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Ei ole kelvollinen desimaaliluku"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Ei ole kelvollinen liukuluku"
diff --git a/venv/Lib/site-packages/wtforms/locale/fr/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/fr/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..389e4b8e250b77cf224414a828c51b1eaba0f6bd
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/fr/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/fr/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/fr/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..8a258074537c4443a6a6cefb5a43c701263555ab
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/fr/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# French translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+# Stéphane Raimbault <stephane.raimbault@gmail.com>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.3\n"
+"Report-Msgid-Bugs-To: eloi.rivard@nubla.fr\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2023-10-06 21:11+0000\n"
+"Last-Translator: Éloi Rivard <eloi.rivard@nubla.fr>\n"
+"Language-Team: French <https://hosted.weblate.org/projects/wtforms/wtforms/"
+"fr/>\n"
+"Language: fr\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=n > 1;\n"
+"X-Generator: Weblate 5.1-dev\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Nom de champ non valide « %s »."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Le champ doit être égal à %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Le champ doit contenir au moins %(min)d caractère."
+msgstr[1] "Le champ doit contenir au moins %(min)d caractères."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Le champ ne peut pas contenir plus de %(max)d caractère."
+msgstr[1] "Le champ ne peut pas contenir plus de %(max)d caractères."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "Le champ doit contenir exactement %(max)d caractère."
+msgstr[1] "Le champ doit contenir exactement %(max)d caractères."
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr ""
+"La longueur du champ doit être comprise entre %(min)d et %(max)d caractères."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Le nombre doit être au minimum %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Le nombre doit être au maximum %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Le nombre doit être compris entre %(min)s et %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Ce champ est requis."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Saisie non valide."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Adresse électronique non valide."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Adresse IP non valide."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Adresse MAC non valide."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "URL non valide."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID non valide."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Valeur non valide, doit être parmi : %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Valeur non valide, ne peut contenir : %(values)s."
+
+#: src/wtforms/validators.py:698
+msgid "This field cannot be edited"
+msgstr "Ce champ n’est pas éditable"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr "Ce champ est désactivé et ne peut avoir de valeur"
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Jeton CSRF non valide."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Jeton CSRF manquant."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF a échoué."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Jeton CSRF expiré."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Choix non valide, ne peut pas être converti."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "Vous devez choisir au moins un élément."
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "N'est pas un choix valide."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Choix incorrect, une ou plusieurs saisies ne peuvent pas être converties."
+
+#: src/wtforms/fields/choices.py:204
+#, python-format
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "« %(value)s » n'est pas un choix valide pour ce champ."
+msgstr[1] "« %(value)s » n'est pas un choix valide pour ce champ."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "N'est pas une date/heure valide."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "N'est pas une date valide."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "N'est pas un horaire valide."
+
+#: src/wtforms/fields/datetime.py:148
+msgid "Not a valid week value."
+msgstr "N'est pas une semaine valide."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "N'est pas un entier valide."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "N'est pas une valeur décimale valide."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "N'est pas un flottant valide."
diff --git a/venv/Lib/site-packages/wtforms/locale/he/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/he/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..b16091cb44a80004dd3a4f30c237a8ae359e9e67
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/he/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/he/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/he/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..a96f927453a95fc148d67448256de31c0b9ce96c
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/he/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# Hebrew translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2017-04-19 00:41+0300\n"
+"Last-Translator: Tomer Levy <tmrlvi@gmail.com>\n"
+"Language-Team: he <LL@li.org>\n"
+"Language: he\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "שם שדה לא תקין: '%s'"
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "שדה חייב להיות זהה ל-%(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "שדה חייב להכיל לפחות %(min)d תו."
+msgstr[1] "שדה חייב להכיל לפחות %(min)d תווים."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "שדה אינו יכול להכיל יותר מ-%(max)d תו"
+msgstr[1] "שדה אינו יכול להכיל יותר מ-%(max)d תווים"
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "שדה חייב להכיל בין %(min)d ל-%(max)d תווים"
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "מספר חייב להיות לפחות %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "מספר חייב להיות לכל היותר %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "מספר חייב להיות בין %(min)s ו-%(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "חובה למלא שדה זה."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "קלט לא תקין."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "כתובת מייל לא תקינה."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "כתובת IP לא תקינה."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "כתובת Mac לא תקינה."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "URL לא תקין."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID לא תקין."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "ערך לא חוקי, חייב להיות מתוך: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "ערך לא חוקי, לא יכול להיות מתוך: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "חובה למלא שדה זה."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "מזהה CSRF לא תקין"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "מזהה CSRF חסר"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF נכשל"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "מזהה CSRF פג תוקף"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "לא בחירה חוקית"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "בחירה\\ות לא תקינה: לא ניתן לכפות סוג על קלט אחד או יותר"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' אינו בחירה תקינה עבור השדה הזה"
+msgstr[1] "'%(value)s' אינו בחירה תקינה עבור השדה הזה"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "לא ערך תאריך-זמן תקין"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "לא תאריך תקין"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "לא תאריך תקין"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "לא מספר שלם תקין"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "לא מספר עשרוני"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "לא מספר מסוג float"
diff --git a/venv/Lib/site-packages/wtforms/locale/hu/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/hu/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..40ddc569a72e67041682ace4bc34d7422f1b5cb9
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/hu/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/hu/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/hu/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..577d70334d1e7aadd5e225202590ed2c6f69ad4e
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/hu/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# Hungarian translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2016-09-27 13:09-0400\n"
+"Last-Translator: Zoltan Fedor <zoltan.0.fedor@gmail.com>\n"
+"Language-Team: Hungarian <>\n"
+"Language: hu\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Érvénytelen mező '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "A mező értéke %(other_name)s kell hogy legyen."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "A mező legalább %(min)d karakter hosszú kell hogy legyen."
+msgstr[1] ""
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "A mező nem lehet hosszabb mint %(max)d karakter."
+msgstr[1] ""
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "A mező hossza %(min)d és %(max)d karakter között kell hogy legyen."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "A szám %(min)s vagy nagyobb kell hogy legyen."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "A szám maximum %(max)s lehet."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "A szám %(min)s és %(max)s között kell hogy legyen."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Ez a mező kötelező."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Érvénytelen adat."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Érvénytelen email cím."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Érvénytelen IP cím."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Érvénytelen Mac cím."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Érvénytelen URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Érvénytelen UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Érvénytelen adat, a következőek egyike kell hogy legyen: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Érvénytelen adat, a következőek egyike sem lehet: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Ez a mező kötelező."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Érvénytelen CSRF token"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Hiányzó CSRF token"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF hiba"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Lejárt CSRF token"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Érvénytelen választás: adat nem használható"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Érvénytelen érték"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Érvénytelen választás: egy vagy több adat elem nem használható"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' egy érvénytelen érték ebben a mezőben"
+msgstr[1] "'%(value)s' egy érvénytelen érték ebben a mezőben"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Érvénytelen adat, nem dátum/időpont"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Érvénytelen adat, nem dátum"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Érvénytelen adat, nem dátum"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Érvénytelen adat, nem egész szám"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Érvénytelen adat, nem decimális szám"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Érvénytelen adat, nem lebegőpontos szám"
diff --git a/venv/Lib/site-packages/wtforms/locale/it/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/it/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..2095cfd6cad93a92733147b38e7da8ccbdc05d9f
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/it/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/it/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/it/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..8d2c7835fe8376f9d18af6771a926141d412a6db
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/it/LC_MESSAGES/wtforms.po
@@ -0,0 +1,190 @@
+# Italian translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2017-03-01 11:53+0100\n"
+"Last-Translator: \n"
+"Language-Team: \n"
+"Language: it\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Nome del campo non valido '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Il valore deve essere uguale a %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Il valore deve essere lungo almeno %(min)d carattere."
+msgstr[1] "Il valore deve essere lungo almeno %(min)d caratteri."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Il valore non può essere più lungo di %(max)d carattere."
+msgstr[1] "Il valore non può essere più lungo di %(max)d caratteri."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr ""
+"La lunghezza del valore deve essere compresa tra %(min)d e %(max)d caratteri."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Il numero deve essere maggiore o uguale a %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Il numero deve essere minore o uguale a %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Il numero deve essere compreso tra %(min)s e %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Questo campo è obbligatorio."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Valore non valido."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Indirizzo e-mail non valido."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Indirizzo IP non valido."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Indirizzo Mac non valido."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "URL non valido."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID non valido."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Valore non valido, deve essere uno tra: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Valore non valido, non può essere nessuno tra: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Questo campo è obbligatorio."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Token CSRF non valido"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Token CSRF mancante"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF fallito"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Token CSRF scaduto"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Opzione non valida: valore non convertibile"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Non è una opzione valida"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Opzione(i) non valida(e): uno o pù valori non possono essere convertiti"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' non è una opzione valida per questo campo"
+msgstr[1] "'%(value)s' non è una opzione valida per questo campo"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Il valore non corrisponde ad una data e un orario validi"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Non è una data valida"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Non è una data valida"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Non è una valore intero valido"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Non è un valore decimale valido"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Non è un valore in virgola mobile valido"
diff --git a/venv/Lib/site-packages/wtforms/locale/ja/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/ja/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..91df7aabbf6a24f5825363ae93eb147c43450e2a
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/ja/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/ja/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/ja/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..2e62ba2535455c89782edbfb10d21e56ca79370e
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/ja/LC_MESSAGES/wtforms.po
@@ -0,0 +1,184 @@
+# Japanese translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2015-07-06 23:49+0900\n"
+"Last-Translator: yusuke furukawa <littlefive.jp@gmail.com>\n"
+"Language-Team: ja <littlefive.jp@gmail.com>\n"
+"Language: ja\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=1; plural=0\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "%s は無効なフィールド名です。"
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "フィールドは %(other_name)s でなければいけません。"
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "フィールドは %(min)d 文字以上でなければなりません。"
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "フィールドは %(max)d 文字を超えることはできません。"
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "フィールドは %(min)d 以上, %(max)d 文字以内でなければなりません。"
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "数値は %(min)s 以上でなければなりません。"
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "数値は 最高でも %(max)s でなければなりません。"
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "数値は %(min)s 以上, %(max)s 以下でなければいけません。"
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "このフィールドは必須です。"
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "無効な入力です。"
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "無効なメールアドレスです。"
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "無効なIPアドレスです。"
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "無効なMacアドレスです。"
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "無効なURLです。"
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "無効なUUIDです。"
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "無効な値です, 次のうちの1つでなければいけません: %(values)s。"
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "無効な値です、次に含まれるものは使えません: %(values)s。"
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "このフィールドは必須です。"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "不正なCSRFトークンです。"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRFトークンがありません。"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF認証に失敗しました。"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRFトークンの期限が切れました。"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "無効な選択: 型変換できません。"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "選択肢が正しくありません。"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "無効な選択: 1つ以上の値を型変換できません。"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' はこのフィールドでは有効ではありません。"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "無効な時間型です。"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "無効な日付型です。"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "無効な時間型です。"
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "無効な日付型です。"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "無効な整数です。"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "無効な少数です。"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "無効なfloat値です。"
diff --git a/venv/Lib/site-packages/wtforms/locale/ko/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/ko/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..340c9f7970517f910a2962190ffeb5da084d2240
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/ko/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/ko/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/ko/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..a8b301cb5bcfcc64e142f089e44221f077076f5d
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/ko/LC_MESSAGES/wtforms.po
@@ -0,0 +1,184 @@
+# Korean translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.3\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2013-02-15 00:12+0900\n"
+"Last-Translator: GunWoo Choi <6566gun@gmail.com>\n"
+"Language-Team: ko_KR <6566gun@gmail.com>\n"
+"Language: ko\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=1; plural=0\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "'%s'는 올바르지 않은 항목 이름입니다."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "이 항목은 %(other_name)s 항목과 같아야 합니다."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "이 항목은 최소 %(min)d자 이상이어야 합니다."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "이 항목은 %(max)d자 보다 많을 수 없습니다."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "이 항목은 정확히 %(max)d자이어야 합니다"
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "이 항목은 최소 %(min)d자 이상, %(max)d자 이하이어야 합니다."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "이 값은 최소 %(min)s 이상이어야 합니다."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "이 값은 %(max)s보다 클 수 없습니다."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "이 값은 %(min)s 이상, %(max)s 이하이어야 합니다."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "이 항목은 필수입니다."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "올바르지 않은 입력값입니다."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "올바르지 않은 이메일 주소입니다."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "올바르지 않은 IP 주소입니다."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "올바르지 않은 Mac주소입니다."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "올바르지 않은 URL입니다."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "올바르지 않은 UUID입니다."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "올바르지 않은 값입니다, 다음 중 하나이어야 합니다: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "올바르지 않은 값입니다, 다음 값은 사용할 수 없습니다: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "이 항목은 필수입니다."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "올바르지 않은 CSRF 토큰입니다."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF 토큰을 찾을 수 없습니다."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF 인증에 실패하였습니다."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF 토큰이 만료되었습니다."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "올바르지 않은 선택값입니다: 변환할 수 없습니다."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "선택값이 None일 수 없습니다."
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "올바르지 않은 선택값입니다."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "올바르지 않은 선택값입니다: 한개 이상의 값을 변화할 수 없습니다."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s'는 이 항목에 유효하지 않은 선택 값입니다."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "올바르지 않은 시간 값입니다."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "올바르지 않은 날짜 값입니다."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "올바르지 않은 시간 값입니다."
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "올바르지 않은 날짜 값입니다."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "올바르지 않은 정수 값입니다."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "올바르지 않은 숫자 값입니다."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "올바르지 않은 float 값입니다."
diff --git a/venv/Lib/site-packages/wtforms/locale/nb/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/nb/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..bb7c89e273d5b20342caf4eab8a0cc525ffb613f
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/nb/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/nb/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/nb/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..2f53eb7d136a4c0e95b7fef080bfab214544eebe
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/nb/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# Norwegian Bokmål translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2014-05-05 16:18+0100\n"
+"Last-Translator: Frode Danielsen <frode@e5r.no>\n"
+"Language-Team: nb <LL@li.org>\n"
+"Language: nb\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Ugyldig feltnavn '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Feltet må være lik som %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Feltet må være minst %(min)d tegn langt."
+msgstr[1] "Feltet må være minst %(min)d tegn langt."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Feltet kan ikke være lenger enn %(max)d tegn."
+msgstr[1] "Feltet kan ikke være lenger enn %(max)d tegn."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Feltet må være mellom %(min)d og %(max)d tegn langt."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Tall må være minst %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Tall må være maks %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Tall må være mellom %(min)s og %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Dette feltet er påkrevd."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Ugyldig verdi."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Ugyldig e-postadresse."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Ugyldig IP-adresse."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Ugyldig MAC-adresse."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Ugyldig URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Ugyldig UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Ugyldig verdi, den må være en av følgende: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Ugyldig verdi, den kan ikke være en av følgende: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Dette feltet er påkrevd."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Ugyldig CSRF-pollett"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Manglende CSRF-pollett"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF-sjekk feilet"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Utløpt CSRF-pollett"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Ugyldig valg: Kunne ikke oversette"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Ikke et gyldig valg"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Ugyldig(e) valg: En eller flere dataverdier kunne ikke oversettes"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' er ikke et gyldig valg for dette feltet"
+msgstr[1] "'%(value)s' er ikke et gyldig valg for dette feltet"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Ikke en gyldig dato- og tidsverdi"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Ikke en gyldig datoverdi"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Ikke en gyldig datoverdi"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Ikke en gyldig heltallsverdi"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Ikke en gyldig desimalverdi"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Ikke en gyldig flyttallsverdi"
diff --git a/venv/Lib/site-packages/wtforms/locale/nl/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/nl/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..c80e7635ef60d26fa5f59ee7aa6b8bcfc94ceb2a
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/nl/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/nl/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/nl/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..8a94ca61a817028719b78510a82e69f4bbc81ea5
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/nl/LC_MESSAGES/wtforms.po
@@ -0,0 +1,189 @@
+# Dutch translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2014-01-16 09:56+0100\n"
+"Last-Translator: Dirk Zittersteyn <dirk.zittersteyn@paylogic.eu>\n"
+"Language-Team: nl <dirk.zittersteyn@paylogic.eu>\n"
+"Language: nl\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Ongeldige naam voor veld '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Veld moet gelijk zijn aan %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Veld moet minstens %(min)d karakter lang zijn."
+msgstr[1] "Veld moet minstens %(min)d karakters lang zijn."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Veld mag niet langer zijn dan %(max)d karakter."
+msgstr[1] "Veld mag niet langer zijn dan %(max)d karakters."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "Veld moet exact %(max)d karakter lang zijn."
+msgstr[1] "Veld moet exact %(max)d karakters lang zijn."
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Veld moet tussen %(min)d en %(max)d karakters lang zijn."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Getal moet minstens %(min)s zijn."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Nummer mag maximaal %(max)s zijn."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Nummer moet tussen %(min)s en %(max)s liggen."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Dit veld is vereist."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Ongeldige invoer."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Ongeldig e-mailadres."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Ongeldig IP-adres."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Ongeldig MAC-adres."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Ongeldige URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Ongeldige UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Ongeldige waarde, moet een waarde zijn uit: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Ongeldige waarde, kan geen waarde zijn uit: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Dit veld is vereist."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Ongeldig CSRF-token."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF-token ontbreekt."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF is gefaald."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF-token is verlopen."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Ongeldige keuze: kon niet omgezet worden."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "Keuzes mogen niet None zijn."
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Ongeldige keuze."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Ongeldige keuze(s): een of meer van de invoeren kon niet omgezet worden."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' is een ongeldige keuze voor dit veld."
+msgstr[1] "'%(value)s' is een ongeldige keuze voor dit veld."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Ongeldige datum/tijd."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Ongeldige datum."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "Ongeldige waarde."
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Ongeldige datum."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Ongeldig getal."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Ongeldige decimale waarde."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Ongeldige float-waarde."
diff --git a/venv/Lib/site-packages/wtforms/locale/pl/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/pl/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..72da9e205feaeb9d1a736a72c0c7c1167d99706f
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/pl/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/pl/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/pl/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..572d5e0cd7b3a0526e7636dc2dc2b0fa4db73a88
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/pl/LC_MESSAGES/wtforms.po
@@ -0,0 +1,194 @@
+# Polish translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2012-05-05 23:20+0200\n"
+"Last-Translator: Aleksander Nitecki <ixendr@itogi.re>\n"
+"Language-Team: pl <wolanskim@gmail.com>\n"
+"Language: pl\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 "
+"|| n%100>=20) ? 1 : 2)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Nieprawidłowa nazwa pola '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Wartość pola musi być równa %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Pole musi mieć przynajmniej %(min)d znak."
+msgstr[1] "Pole musi mieć przynajmniej %(min)d znaki."
+msgstr[2] "Pole musi mieć przynajmniej %(min)d znaków."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Wartość w polu nie może mieć więcej niż %(max)d znak."
+msgstr[1] "Wartość w polu nie może mieć więcej niż %(max)d znaki."
+msgstr[2] "Wartość w polu nie może mieć więcej niż %(max)d znaków."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+msgstr[2] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Wartość musi być długa na od %(min)d do %(max)d znaków."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Liczba musi być większa lub równa %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Liczba musi być mniejsza lub równa %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Liczba musi być z zakresu %(min)s i %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "To pole jest wymagane."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Nieprawidłowa wartość."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Nieprawidłowy adres e-mail."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Nieprawidłowy adres IP."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Nieprawidłowy adres Mac."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Nieprawidłowy URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Nieprawidłowy UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Wartość musi być jedną z: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Wartość nie może być żadną z: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "To pole jest wymagane."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Nieprawidłowy token CSRF"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Brak tokena CSRF"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "błąd CSRF"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Wygasł token CSRF"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Nieprawidłowy wybór: nie można skonwertować"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Nieprawidłowy wybór"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Nieprawidłowy wybór: nie można skonwertować przynajmniej jednej wartości"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' nie jest poprawnym wyborem dla tego pola"
+msgstr[1] "'%(value)s' nie jest poprawnym wyborem dla tego pola"
+msgstr[2] "'%(value)s' nie jest poprawnym wyborem dla tego pola"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Nieprawidłowa data i czas"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Nieprawidłowa data"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Nieprawidłowa data"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Nieprawidłowa liczba całkowita"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Nieprawidłowa liczba dziesiętna"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Nieprawidłowa liczba zmiennoprzecinkowa"
diff --git a/venv/Lib/site-packages/wtforms/locale/pt/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/pt/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..dc1e902db69e8f94a14e8d48f1497c16deebc47e
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/pt/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/pt/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/pt/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..aed1d2e61941351dc7eda7bc6f4f6dc59709b946
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/pt/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# Portuguese translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2014-01-16 10:36+0100\n"
+"Last-Translator: Rui Pacheco <rui.pacheco@gmail.com>\n"
+"Language-Team: pt <rui.pacheco@gmail.com>\n"
+"Language: pt\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Nome do campo inválido '%s'"
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "O campo deve ser igual a %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "O campo deve ter pelo menos %(min)d caracteres."
+msgstr[1] "Os campos devem ter pelo menos %(min)d caracteres."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "O campo não pode ter mais do que %(max)d caracteres."
+msgstr[1] "Os campos não podem ter mais do que %(max)d caracteres."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "O campo deve ter entre %(min)d e %(max)d caracteres."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "O valor não pode ser menos do que %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "O valor não pode ser mais do que %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "O valor tem que ser entre %(min)s e %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Campo obrigatório."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Entrada inválida."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Email inválido."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "IP inválido."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Mac address inválido."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "URL inválido."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID inválido."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Valor inválido, deve ser um dos seguintes: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Valor inválido, não deve ser um dos seguintes: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Campo obrigatório."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Token CSRF inválido."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Falta o token CSRF."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF falhou."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Token CSRF expirado."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Escolha inválida: não é possível calcular."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Escolha inválida."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Escolha(s) inválida(s): não é possível calcular alguns dos valores."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "‘%(value)s’ não é uma escolha válida para este campo."
+msgstr[1] "‘%(value)s’ não é uma escolha válida para este campo."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "O valor temporal não é válido."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "A data não é válida."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "A data não é válida."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "O valor inteiro não é válido."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "O valor decimal não é válido."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "O valor com vírgula flutuante não é válido. "
diff --git a/venv/Lib/site-packages/wtforms/locale/ro/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/ro/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..06ea164f7dbc1d503e14549e8c1baef30321b69b
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/ro/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/ro/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/ro/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..46b8c9bcebe8e0a347e1a8a823d19f5fe4039055
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/ro/LC_MESSAGES/wtforms.po
@@ -0,0 +1,191 @@
+# Translations template for WTForms.
+# Copyright (C) 2023 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 3.0.1\n"
+"Report-Msgid-Bugs-To: eloi.rivard@nubla.fr\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2023-10-07 06:11+0000\n"
+"Last-Translator: Victor Buzdugan <buzdugan.victor@icloud.com>\n"
+"Language-Team: Romanian <https://hosted.weblate.org/projects/wtforms/wtforms/"
+"ro/>\n"
+"Language: ro\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < "
+"20)) ? 1 : 2;\n"
+"X-Generator: Weblate 5.1-dev\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Numele câmpului este invalid '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Câmpul trebuie să fie egal cu %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Câmpul trebuie să aibă minim %(min)d caracter."
+msgstr[1] "Câmpul trebuie să aibă minim %(min)d caractere."
+msgstr[2] "Câmpul trebuie să aibă minim %(min)d de caractere."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Câmpul nu poate fi mai mare de %(max)d caracter."
+msgstr[1] "Câmpul nu poate fi mai mare de %(max)d caractere."
+msgstr[2] "Câmpul nu poate fi mai mare de %(max)d de caractere."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "Câmpul trebuie să aibă exact %(max)d caracter."
+msgstr[1] "Câmpul trebuie să aibă exact %(max)d caractere."
+msgstr[2] "Câmpul trebuie să aibă exact %(max)d de caractere."
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Câmpul trebuie să aibă între %(min)d și %(max)d caractere."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Numărul trebuie să fie cel puțin %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Numărul trebuie să fie cel mult %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Numărul trebuie să fie între %(min)s și %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Acest câmp este obligatoriu."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Valoarea introdusă este invalidă."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Adresa email este invalidă."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Adresa IP este invalidă."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Adresa Mac este invalidă."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "URL invalid."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "UUID invalid."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Valoare invalidă, trebuie să fie una din: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Valoare invalidă, nu trebuie să fie una din: %(values)s."
+
+#: src/wtforms/validators.py:698
+msgid "This field cannot be edited"
+msgstr "Câmpul nu poate fi editat"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr "Câmpul este dezactivat și nu poate conține o valoare"
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Token-ul CSRF este invalid."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Lipsește token-ul CSRF."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "Validarea CSRF a eșuat."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "Token-ul CSRF a expirat."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Selecție invalidă: valoarea nu a putut fi transformată."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "Selecția nu poate fi None."
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Selecție invalidă."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Selecție(ii) invalidă: una sau mai multe valori nu au putut fi transformate."
+
+#: src/wtforms/fields/choices.py:204
+#, python-format
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' nu este o selecție validă pentru acest câmp."
+msgstr[1] "'%(value)s' nu sunt selecții valide pentru acest câmp."
+msgstr[2] "'%(value)s' nu sunt selecții valide pentru acest câmp."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Valoare dată-timp invalidă."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Valoarea pentru dată este invalidă."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "Valoarea pentru timp este invalidă."
+
+#: src/wtforms/fields/datetime.py:148
+msgid "Not a valid week value."
+msgstr "Valoarea pentru săptămână este invalidă."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Număr întreg invalid."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Număr zecimal invalid."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Număr cu virgulă invalid."
diff --git a/venv/Lib/site-packages/wtforms/locale/ru/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/ru/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..27cb64efb003cf422021cf488fddf444d6d1a211
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/ru/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/ru/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/ru/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..bb87e8ee8304ead804cfffc19702b85077ac6a5e
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/ru/LC_MESSAGES/wtforms.po
@@ -0,0 +1,195 @@
+# Russian translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.3\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2012-08-01 10:23+0400\n"
+"Last-Translator: Yuriy Khomyakov <_yurka_@inbox.ru>\n"
+"Language-Team: ru <LL@li.org>\n"
+"Language: ru\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && "
+"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Неправильное имя поля '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Поле должно совпадать с %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Значение должно содержать не менее %(min)d символа."
+msgstr[1] "Значение должно содержать не менее %(min)d символов."
+msgstr[2] "Значение должно содержать не менее %(min)d символов."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Значение не должно содержать более %(max)d символа."
+msgstr[1] "Значение не должно содержать более %(max)d символов."
+msgstr[2] "Значение не должно содержать более %(max)d символов."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "Значение должно быть точно %(max)d символа."
+msgstr[1] "Значение должно быть %(max)d символов."
+msgstr[2] "Значение должно быть %(max)d символов."
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Значение должно содержать от %(min)d до %(max)d символов."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Число должно быть больше %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Число должно быть меньше %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Значение должно быть между %(min)s и %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Обязательное поле."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Некорректный ввод."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Неверный адрес электронной почты."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Неверный IP адрес."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Неверный MAC адрес."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Неверный URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Неверный UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Неверное значение, должно быть одним из %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Неверное значение, не должно быть одним из %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Обязательное поле."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Неверный CSRF токен."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF токен отсутствует."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "Ошибка CSRF."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF токен просрочен."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Неверный вариант: невозможно преобразовать."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "Выбор не может быть None"
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Неверный вариант."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+"Неверный вариант(варианты): одно или несколько значений невозможно "
+"преобразовать."
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' - неверный вариант для этого поля."
+msgstr[1] "'%(value)s' - неверный вариант для этого поля."
+msgstr[2] "'%(value)s' - неверный вариант для этого поля."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Неверное значение даты и времени."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Неверное значение даты."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "Неверное значение времени."
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Неверное значение даты."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Неверное целое число."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Неверное десятичное число."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Неверное десятичное число."
diff --git a/venv/Lib/site-packages/wtforms/locale/sk/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/sk/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..836e6f3f2d8ff0bad180dc9da226f54e669e611c
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/sk/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/sk/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/sk/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..8a2be88b873a6173bdee5b4bb5d5e8b263ca23f9
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/sk/LC_MESSAGES/wtforms.po
@@ -0,0 +1,189 @@
+# Slovak translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: eloi.rivard@nubla.fr\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2023-10-06 21:11+0000\n"
+"Last-Translator: Milan Šalka <salka.milan@googlemail.com>\n"
+"Language-Team: Slovak <https://hosted.weblate.org/projects/wtforms/wtforms/"
+"sk/>\n"
+"Language: sk\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
+"X-Generator: Weblate 5.1-dev\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Neplatný názov poľa '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Hodnota poľa musí byť rovnaká ako v prípade %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Pole musí obsahovať aspoň %(min)d znak."
+msgstr[1] "Pole musí obsahovať aspoň %(min)d znaky."
+msgstr[2] "Pole musí obsahovať aspoň %(min)d znakov."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Pole nesmie byť dlhšie ako %(max)d znak."
+msgstr[1] "Pole nesmie byť dlhšie ako %(max)d znaky."
+msgstr[2] "Pole nesmie byť dlhšie ako %(max)d znakov."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "Pole musí byť presne %(max)d znak dlhé."
+msgstr[1] "Pole musí byť presne %(max)d znaky dlhé."
+msgstr[2] "Pole musí byť presne %(max)d znakov dlhé."
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Počet znakov v poli musí byť medzi %(min)d a %(max)d."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Číslo musí byť aspoň %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Číslo musí byť najviac %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Číslo musí byť medzi %(min)s a %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Toto pole je povinné."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Neplatný vstup."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Neplatná emailová adresa."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Neplatná IP adresa."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Neplatná MAC adresa."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Neplatné URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Neplatné UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Neplatná hodnota, povolené hodnoty sú: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Neplatná hodnota, nesmie byť jedna z: %(values)s."
+
+#: src/wtforms/validators.py:698
+msgid "This field cannot be edited"
+msgstr "Toto pole nie je možné upravovať"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr "Toto pole je zakázané a nemôže mať hodnotu"
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Neplatný CSRF token."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "Chýba CSRF token."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "Chyba CSRF."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF token expiroval."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Neplatná voľba: hodnotu sa nepodarilo previesť."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "Výbery nemôžu byť žiadne."
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Neplatná voľba."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Neplatná voľba: jeden alebo viacero vstupov sa nepodarilo previesť."
+
+#: src/wtforms/fields/choices.py:204
+#, python-format
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' nieje platnou voľbou pre toto pole."
+msgstr[1] "'%(value)s' niesú platnou voľbou pre toto pole."
+msgstr[2] "'%(value)s' nieje platnou voľbou pre toto pole."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Neplatná hodnota pre dátum a čas."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Neplatná hodnota pre dátum."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "Nie je platná hodnota času."
+
+#: src/wtforms/fields/datetime.py:148
+msgid "Not a valid week value."
+msgstr "Nie je platný týždeň."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Neplatná hodnota pre celé číslo."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Nie je platnou desatinnou hodnotou."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Neplatná hodnota pre desatinné číslo."
diff --git a/venv/Lib/site-packages/wtforms/locale/sv/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/sv/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..fad84e287270f17a8c23f68161a250ed8ffd3cfd
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/sv/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/sv/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/sv/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..d31bf0dbcd33d6a4b035daa7e3cc2417a6a59dfd
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/sv/LC_MESSAGES/wtforms.po
@@ -0,0 +1,185 @@
+# Swedish translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: eloi.rivard@nubla.fr\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2023-11-25 05:13+0000\n"
+"Last-Translator: bittin1ddc447d824349b2 <bittin@reimu.nl>\n"
+"Language-Team: Swedish <https://hosted.weblate.org/projects/wtforms/wtforms/"
+"sv/>\n"
+"Language: sv\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=n != 1;\n"
+"X-Generator: Weblate 5.2.1-rc\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Felaktigt fältnamn '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Fältvärdet måste matcha %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Fältet måste vara minst %(min)d tecken långt."
+msgstr[1] "Fältet måste vara minst %(min)d tecken långt."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Fältet får inte vara längre än %(max)d tecken."
+msgstr[1] "Fältet får inte vara längre än %(max)d tecken."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "Fält måste vara exakt %(max)d tecken långt."
+msgstr[1] "Fältet måste vara exakt %(max)d tecken långt."
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Fältet måste vara mellan %(min)d och %(max)d tecken långt."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Numret får inte vara mindre än %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Numret får inte vara högre än %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Numret måste vara mellan %(min)s och %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Det här fältet är obligatoriskt."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Felaktig indata."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Felaktig epost-adress."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Felaktig IP-adress."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Felaktig MAC-adress."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Felaktig URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Felaktig UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Felaktigt värde, måste vara ett av: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Felaktigt värde, får inte vara något av: %(values)s."
+
+#: src/wtforms/validators.py:698
+msgid "This field cannot be edited"
+msgstr "Detta fält kan inte redigeras"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr "Det här fältet är inaktiverat och kan inte ha ett värde"
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Felaktigt CSRF-token"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF-token saknas"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF misslyckades"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF-token utdaterat"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Felaktigt val; kunde inte ceorce:a"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "Val kan inte vara Inga."
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Inte ett giltigt val"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Felaktigt val; ett eller flera inputfält kunde inte coerca:s"
+
+#: src/wtforms/fields/choices.py:204
+#, python-format
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' är inte ett giltigt val för detta fält."
+msgstr[1] "'%(value)s' är inte giltiga val för detta fält."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Inte ett giltigt datum-/tidsvärde"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Inte ett giltigt datum"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "Inte ett giltigt tidsvärde."
+
+#: src/wtforms/fields/datetime.py:148
+msgid "Not a valid week value."
+msgstr "Inte ett giltigt veckovärde."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Inte ett giltigt heltal"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Inte ett giltigt decimaltal"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Inte ett giltigt flyttal"
diff --git a/venv/Lib/site-packages/wtforms/locale/tr/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/tr/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..b73ce7cb473d12d7eff6921ff7426ffba6c242dc
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/tr/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/tr/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/tr/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..29091a3b878f906e99929e49dca336e2ed9abd9e
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/tr/LC_MESSAGES/wtforms.po
@@ -0,0 +1,185 @@
+# Turkish translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.4\n"
+"Report-Msgid-Bugs-To: eloi.rivard@nubla.fr\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2023-11-05 19:46+0000\n"
+"Last-Translator: Oğuz Ersen <oguz@ersen.moe>\n"
+"Language-Team: Turkish <https://hosted.weblate.org/projects/wtforms/wtforms/"
+"tr/>\n"
+"Language: tr\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=n != 1;\n"
+"X-Generator: Weblate 5.2-dev\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Geçersiz alan adı '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Alan %(other_name)s ile eşit olmalı."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Alan en az %(min)d karakter uzunluğunda olmalı."
+msgstr[1] "Alan en az %(min)d karakter uzunluğunda olmalı."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Alan %(max)d karakterden uzun olamaz."
+msgstr[1] "Alan %(max)d karakterden uzun olamaz."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "Alan tam olarak %(max)d karakter uzunluğunda olmalı."
+msgstr[1] "Alan tam olarak %(max)d karakter uzunluğunda olmalı."
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Alanın uzunluğu %(min)d ile %(max)d karakter arasında olmalıdır."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Sayı en az %(min)s olmalıdır."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Sayı en fazla %(max)s olmalıdır."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Sayı %(min)s ile %(max)s arasında olmalıdır."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Bu alan zorunludur."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Geçersiz girdi."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Geçersiz e-posta adresi."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Geçersiz IP adresi."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Geçersiz Mac adresi."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Geçersiz URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Geçersiz UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Geçersiz değer, değerlerden biri olmalı: %(values)s ."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Geçersiz değer, değerlerden biri olamaz: %(values)s."
+
+#: src/wtforms/validators.py:698
+msgid "This field cannot be edited"
+msgstr "Bu alan düzenlenemez"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr "Bu alan devre dışıdır ve bir değere sahip olamaz"
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Geçersiz CSRF Anahtarı."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF anahtarı gerekli."
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF hatalı."
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF anahtarının süresi doldu."
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Geçersiz seçim: tip uyuşmazlığı."
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "Seçimler Hiçbiri olamaz."
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Geçerli bir seçenek değil."
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Geçersiz seçenek: bir yada daha fazla tip uyuşmazlığı."
+
+#: src/wtforms/fields/choices.py:204
+#, python-format
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' bu alan için geçerli bir seçim değil."
+msgstr[1] "'%(value)s' bu alan için geçerli bir seçim değil."
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Geçerli bir tarih-saat değeri değil."
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Geçerli bir tarih değeri değil."
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "Geçerli bir zaman değeri değil."
+
+#: src/wtforms/fields/datetime.py:148
+msgid "Not a valid week value."
+msgstr "Geçerli bir hafta değeri değil."
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Geçerli bir tam sayı değeri değil."
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Geçerli bir ondalık sayı değeri değil."
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Geçerli bir ondalık sayı değeri değil."
diff --git a/venv/Lib/site-packages/wtforms/locale/uk/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/uk/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..d6e6de93cc615663aff0444cc3fedb713826f644
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/uk/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/uk/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/uk/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..35a116a64c5405ae976973cf3643b1b2f44e71b6
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/uk/LC_MESSAGES/wtforms.po
@@ -0,0 +1,193 @@
+# Ukrainian translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 2.0dev\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2014-01-16 10:04+0100\n"
+"Last-Translator: Oleg Pidsadnyi <oleg.pidsadnyi@paylogic.eu>\n"
+"Language-Team: uk <oleg.pidsadnyi@paylogic.eu>\n"
+"Language: uk\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && "
+"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "Невірне ім'я поля '%s'."
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "Поле має співпадати з %(other_name)s."
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "Значення поля має містити не менше %(min)d символа."
+msgstr[1] "Значення поля має містити не менше ніж %(min)d символи."
+msgstr[2] "Значення поля має містити не менше ніж %(min)d символів."
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "Значення поля має містити не більше %(max)d символа."
+msgstr[1] "Значення поля має містити не більше ніж %(max)d символи."
+msgstr[2] "Значення поля має містити не більше ніж %(max)d символів."
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+msgstr[2] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "Значення поля має містити від %(min)d до %(max)d символів."
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "Число має бути щонайменше %(min)s."
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "Число має бути щонайбільше %(max)s."
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "Число має бути між %(min)s та %(max)s."
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "Це поле є обов'язковим."
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "Введено невірно."
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "Невірна електронна адреса."
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "Невірна IP адреса."
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "Невірна Mac адреса."
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "Невірний URL."
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "Невірний UUID."
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "Значення невірне, має бути одним з: %(values)s."
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "Значення невірне, не може бути одним з: %(values)s."
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "Це поле є обов'язковим."
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "Невірний CSRF токен."
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF токен відсутній"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "Помилка CSRF"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF токен прострочений"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "Недійсний варіант: перетворення неможливе"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "Недійсний варіант"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "Недійсний варіант: одне чи більше значень неможливо перетворити"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' не є дійсним варіантом для цього поля"
+msgstr[1] "'%(value)s' не є дійсним варіантом для цього поля"
+msgstr[2] "'%(value)s' не є дійсним варіантом для цього поля"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "Недійсне значення дати/часу"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "Не дійсне значення дати"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "Не дійсне значення часу"
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "Не дійсне значення дати"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "Недійсне ціле число"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "Не є дійсним десятичним числом"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "Недійсне десятичне дробове число"
diff --git a/venv/Lib/site-packages/wtforms/locale/wtforms.pot b/venv/Lib/site-packages/wtforms/locale/wtforms.pot
new file mode 100644
index 0000000000000000000000000000000000000000..9bdf80f22da796bbb9a8edbc06fb15d8770067c8
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/wtforms.pot
@@ -0,0 +1,182 @@
+# Translations template for WTForms.
+# Copyright (C) 2023 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 3.0.1\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr ""
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr ""
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr ""
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr ""
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr ""
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr ""
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr ""
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr ""
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr ""
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr ""
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr ""
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr ""
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr ""
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr ""
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr ""
+
+#: src/wtforms/validators.py:698
+msgid "This field cannot be edited"
+msgstr ""
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr ""
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr ""
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr ""
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr ""
+
+#: src/wtforms/fields/choices.py:204
+#, python-format
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] ""
+msgstr[1] ""
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr ""
+
+#: src/wtforms/fields/datetime.py:148
+msgid "Not a valid week value."
+msgstr ""
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr ""
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr ""
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr ""
diff --git a/venv/Lib/site-packages/wtforms/locale/zh/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/zh/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..f407328eb1baa1dde3830c92697614bfe95bb916
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/zh/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/zh/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/zh/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..792ac02f48ce05f5ac732cf03e7e594423fbbaf4
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/zh/LC_MESSAGES/wtforms.po
@@ -0,0 +1,188 @@
+# Chinese translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.3\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2012-01-31 13:03-0700\n"
+"Last-Translator: Grey Li <withlihui@gmail.com>\n"
+"Language-Team: zh_CN <james+i18n@simplecodes.com>\n"
+"Language: zh\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "'%s' 是无效的字段名。"
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "字段必须和 %(other_name)s 相等。"
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "字段长度必须至少 %(min)d 个字符。"
+msgstr[1] "字段长度必须至少 %(min)d 个字符。"
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "字段长度不能超过 %(max)d 个字符。"
+msgstr[1] "字段长度不能超过 %(max)d 个字符。"
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "字段长度必须为 %(max)d 个字符。"
+msgstr[1] "字段长度必须为 %(max)d 个字符。"
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "字段长度必须介于 %(min)d 到 %(max)d 个字符之间。"
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "数值必须大于 %(min)s。"
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "数值必须小于 %(max)s。"
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "数值大小必须介于 %(min)s 到 %(max)s 之间。"
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "该字段是必填字段。"
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "无效的输入。"
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "无效的 Email 地址。"
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "无效的 IP 地址。"
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "无效的 MAC 地址。"
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "无效的 URL。"
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "无效的 UUID。"
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "无效的值,必须是下列之一: %(values)s。"
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "无效的值,不能是下列任何一个: %(values)s。"
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "该字段是必填字段。"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "无效的 CSRF 验证令牌。"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "缺失 CSRF 验证令牌。"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF 验证失败。"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF 验证令牌过期。"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "选择无效:无法转化类型。"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "选择不能是空值。"
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "不是有效的选择。"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "选择无效:至少一个数据输入无法被转化类型。"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "“%(value)s” 对该字段而言是无效选项。"
+msgstr[1] "“%(value)s” 对该字段而言是无效选项。"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "不是有效的日期与时间值。"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "不是有效的日期值。"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "不是有效的时间值。"
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "不是有效的日期值。"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "不是有效的整数。"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "不是有效的小数。"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "不是有效的浮点数。"
diff --git a/venv/Lib/site-packages/wtforms/locale/zh_TW/LC_MESSAGES/wtforms.mo b/venv/Lib/site-packages/wtforms/locale/zh_TW/LC_MESSAGES/wtforms.mo
new file mode 100644
index 0000000000000000000000000000000000000000..09e7f308bbe6540cdb9f4c93aed5be285aa10c38
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/locale/zh_TW/LC_MESSAGES/wtforms.mo differ
diff --git a/venv/Lib/site-packages/wtforms/locale/zh_TW/LC_MESSAGES/wtforms.po b/venv/Lib/site-packages/wtforms/locale/zh_TW/LC_MESSAGES/wtforms.po
new file mode 100644
index 0000000000000000000000000000000000000000..8e918a7023a8605b552b9a4ae8d6f3b9e5952b13
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/locale/zh_TW/LC_MESSAGES/wtforms.po
@@ -0,0 +1,184 @@
+# Chinese (Traditional, Taiwan) translations for WTForms.
+# Copyright (C) 2020 WTForms Team
+# This file is distributed under the same license as the WTForms project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: WTForms 1.0.3\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2023-10-05 13:42+0200\n"
+"PO-Revision-Date: 2013-04-14 00:26+0800\n"
+"Last-Translator: Grey Li <withlihui@gmail.com>\n"
+"Language-Team: zh_TW <ron@hng.tw>\n"
+"Language: zh_TW\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=1; plural=0\n"
+"Generated-By: Babel 2.8.0\n"
+
+#: src/wtforms/validators.py:86
+#, python-format
+msgid "Invalid field name '%s'."
+msgstr "'%s' 是無效的欄位名。"
+
+#: src/wtforms/validators.py:99
+#, python-format
+msgid "Field must be equal to %(other_name)s."
+msgstr "欄位必須與 %(other_name)s 相同。"
+
+#: src/wtforms/validators.py:145
+#, python-format
+msgid "Field must be at least %(min)d character long."
+msgid_plural "Field must be at least %(min)d characters long."
+msgstr[0] "欄位必須超過 %(min)d 個字元。"
+
+#: src/wtforms/validators.py:151
+#, python-format
+msgid "Field cannot be longer than %(max)d character."
+msgid_plural "Field cannot be longer than %(max)d characters."
+msgstr[0] "欄位必須少於 %(max)d 個字元。"
+
+#: src/wtforms/validators.py:157
+#, python-format
+msgid "Field must be exactly %(max)d character long."
+msgid_plural "Field must be exactly %(max)d characters long."
+msgstr[0] "欄位必須為 %(max)d 個字元。"
+
+#: src/wtforms/validators.py:163
+#, python-format
+msgid "Field must be between %(min)d and %(max)d characters long."
+msgstr "欄位必須介於 %(min)d 至 %(max)d 個字元。"
+
+#: src/wtforms/validators.py:216
+#, python-format
+msgid "Number must be at least %(min)s."
+msgstr "數字必須大於 %(min)s。"
+
+#: src/wtforms/validators.py:219
+#, python-format
+msgid "Number must be at most %(max)s."
+msgstr "數字必須小於 %(max)s。"
+
+#: src/wtforms/validators.py:222
+#, python-format
+msgid "Number must be between %(min)s and %(max)s."
+msgstr "數字必須介於 %(min)s 至 %(max)s 之間。"
+
+#: src/wtforms/validators.py:293 src/wtforms/validators.py:323
+msgid "This field is required."
+msgstr "此欄位為必填。"
+
+#: src/wtforms/validators.py:358
+msgid "Invalid input."
+msgstr "無效的輸入。"
+
+#: src/wtforms/validators.py:422
+msgid "Invalid email address."
+msgstr "無效的電子郵件地址。"
+
+#: src/wtforms/validators.py:460
+msgid "Invalid IP address."
+msgstr "無效的 IP 位址。"
+
+#: src/wtforms/validators.py:503
+msgid "Invalid Mac address."
+msgstr "無效的 MAC 位址。"
+
+#: src/wtforms/validators.py:540
+msgid "Invalid URL."
+msgstr "無效的 URL。"
+
+#: src/wtforms/validators.py:561
+msgid "Invalid UUID."
+msgstr "無效的 UUID。"
+
+#: src/wtforms/validators.py:594
+#, python-format
+msgid "Invalid value, must be one of: %(values)s."
+msgstr "無效的資料,必須為以下任一:%(values)s。"
+
+#: src/wtforms/validators.py:629
+#, python-format
+msgid "Invalid value, can't be any of: %(values)s."
+msgstr "無效的資料,不得為以下任一:%(values)s。"
+
+#: src/wtforms/validators.py:698
+#, fuzzy
+#| msgid "This field is required."
+msgid "This field cannot be edited"
+msgstr "此欄位為必填。"
+
+#: src/wtforms/validators.py:714
+msgid "This field is disabled and cannot have a value"
+msgstr ""
+
+#: src/wtforms/csrf/core.py:96
+msgid "Invalid CSRF Token."
+msgstr "無效的 CSRF 憑證。"
+
+#: src/wtforms/csrf/session.py:63
+msgid "CSRF token missing."
+msgstr "CSRF 憑證不存在。"
+
+#: src/wtforms/csrf/session.py:71
+msgid "CSRF failed."
+msgstr "CSRF 驗證失敗。"
+
+#: src/wtforms/csrf/session.py:76
+msgid "CSRF token expired."
+msgstr "CSRF 憑證過期。"
+
+#: src/wtforms/fields/choices.py:135
+msgid "Invalid Choice: could not coerce."
+msgstr "無效的選擇:無法強制轉化。"
+
+#: src/wtforms/fields/choices.py:139 src/wtforms/fields/choices.py:192
+msgid "Choices cannot be None."
+msgstr "選擇不能為空值。"
+
+#: src/wtforms/fields/choices.py:148
+msgid "Not a valid choice."
+msgstr "不是有效的選擇。"
+
+#: src/wtforms/fields/choices.py:185
+msgid "Invalid choice(s): one or more data inputs could not be coerced."
+msgstr "無效的選擇:至少有一筆資料無法被強制轉化。"
+
+#: src/wtforms/fields/choices.py:204
+#, fuzzy, python-format
+#| msgid "'%(value)s' is not a valid choice for this field."
+msgid "'%(value)s' is not a valid choice for this field."
+msgid_plural "'%(value)s' are not valid choices for this field."
+msgstr[0] "'%(value)s' 對此欄位為無效的選項。"
+
+#: src/wtforms/fields/datetime.py:51
+msgid "Not a valid datetime value."
+msgstr "不是有效的日期與時間。"
+
+#: src/wtforms/fields/datetime.py:77
+msgid "Not a valid date value."
+msgstr "不是有效的日期。"
+
+#: src/wtforms/fields/datetime.py:103
+msgid "Not a valid time value."
+msgstr "不是有效的時間。"
+
+#: src/wtforms/fields/datetime.py:148
+#, fuzzy
+#| msgid "Not a valid date value."
+msgid "Not a valid week value."
+msgstr "不是有效的日期。"
+
+#: src/wtforms/fields/numeric.py:82 src/wtforms/fields/numeric.py:92
+msgid "Not a valid integer value."
+msgstr "不是有效的整數值。"
+
+#: src/wtforms/fields/numeric.py:168
+msgid "Not a valid decimal value."
+msgstr "不是有效的十進位數值。"
+
+#: src/wtforms/fields/numeric.py:197
+msgid "Not a valid float value."
+msgstr "不是有效的浮點數值。"
diff --git a/venv/Lib/site-packages/wtforms/meta.py b/venv/Lib/site-packages/wtforms/meta.py
new file mode 100644
index 0000000000000000000000000000000000000000..710ab58be9e5a0f769e4930cf203d3c7a916b6da
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/meta.py
@@ -0,0 +1,132 @@
+from wtforms import i18n
+from wtforms.utils import WebobInputWrapper
+from wtforms.widgets.core import clean_key
+
+
+class DefaultMeta:
+    """
+    This is the default Meta class which defines all the default values and
+    therefore also the 'API' of the class Meta interface.
+    """
+
+    # -- Basic form primitives
+
+    def bind_field(self, form, unbound_field, options):
+        """
+        bind_field allows potential customization of how fields are bound.
+
+        The default implementation simply passes the options to
+        :meth:`UnboundField.bind`.
+
+        :param form: The form.
+        :param unbound_field: The unbound field.
+        :param options:
+            A dictionary of options which are typically passed to the field.
+
+        :return: A bound field
+        """
+        return unbound_field.bind(form=form, **options)
+
+    def wrap_formdata(self, form, formdata):
+        """
+        wrap_formdata allows doing custom wrappers of WTForms formdata.
+
+        The default implementation detects webob-style multidicts and wraps
+        them, otherwise passes formdata back un-changed.
+
+        :param form: The form.
+        :param formdata: Form data.
+        :return: A form-input wrapper compatible with WTForms.
+        """
+        if formdata is not None and not hasattr(formdata, "getlist"):
+            if hasattr(formdata, "getall"):
+                return WebobInputWrapper(formdata)
+            else:
+                raise TypeError(
+                    "formdata should be a multidict-type wrapper that"
+                    " supports the 'getlist' method"
+                )
+        return formdata
+
+    def render_field(self, field, render_kw):
+        """
+        render_field allows customization of how widget rendering is done.
+
+        The default implementation calls ``field.widget(field, **render_kw)``
+        """
+
+        render_kw = {clean_key(k): v for k, v in render_kw.items()}
+
+        other_kw = getattr(field, "render_kw", None)
+        if other_kw is not None:
+            other_kw = {clean_key(k): v for k, v in other_kw.items()}
+            render_kw = dict(other_kw, **render_kw)
+        return field.widget(field, **render_kw)
+
+    # -- CSRF
+
+    csrf = False
+    csrf_field_name = "csrf_token"
+    csrf_secret = None
+    csrf_context = None
+    csrf_class = None
+
+    def build_csrf(self, form):
+        """
+        Build a CSRF implementation. This is called once per form instance.
+
+        The default implementation builds the class referenced to by
+        :attr:`csrf_class` with zero arguments. If `csrf_class` is ``None``,
+        will instead use the default implementation
+        :class:`wtforms.csrf.session.SessionCSRF`.
+
+        :param form: The form.
+        :return: A CSRF implementation.
+        """
+        if self.csrf_class is not None:
+            return self.csrf_class()
+
+        from wtforms.csrf.session import SessionCSRF
+
+        return SessionCSRF()
+
+    # -- i18n
+
+    locales = False
+    cache_translations = True
+    translations_cache = {}
+
+    def get_translations(self, form):
+        """
+        Override in subclasses to provide alternate translations factory.
+        See the i18n documentation for more.
+
+        :param form: The form.
+        :return: An object that provides gettext() and ngettext() methods.
+        """
+        locales = self.locales
+        if locales is False:
+            return None
+
+        if self.cache_translations:
+            # Make locales be a hashable value
+            locales = tuple(locales) if locales else None
+
+            translations = self.translations_cache.get(locales)
+            if translations is None:
+                translations = self.translations_cache[locales] = i18n.get_translations(
+                    locales
+                )
+
+            return translations
+
+        return i18n.get_translations(locales)
+
+    # -- General
+
+    def update_values(self, values):
+        """
+        Given a dictionary of values, update values on this `Meta` instance.
+        """
+        for key, value in values.items():
+            setattr(self, key, value)
diff --git a/venv/Lib/site-packages/wtforms/utils.py b/venv/Lib/site-packages/wtforms/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4911afa7d70bf8797b0584c1398824c90fd04c37
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/utils.py
@@ -0,0 +1,89 @@
+import re
+
+
+# https://docs.python.org/3/library/datetime.html#technical-detail (see NOTE #9)
+_DATETIME_STRIP_ZERO_PADDING_FORMATS_RE = re.compile(
+    "%-["
+    "d"  # day of month
+    "m"  # month
+    "H"  # hour (24-hour)
+    "I"  # hour (12-hour)
+    "M"  # minutes
+    "S"  # seconds
+    "U"  # week of year (Sunday first day of week)
+    "W"  # week of year (Monday first day of week)
+    "V"  # week of year (ISO 8601)
+    "]",
+    re.MULTILINE,
+)
+
+
+def clean_datetime_format_for_strptime(formats):
+    """
+    Remove dashes used to disable zero-padding with strftime formats (for
+    compatibility with strptime).
+    """
+    return [
+        re.sub(
+            _DATETIME_STRIP_ZERO_PADDING_FORMATS_RE,
+            lambda m: m[0].replace("-", ""),
+            format,
+        )
+        for format in formats
+    ]
+
+
+class UnsetValue:
+    """
+    An unset value.
+
+    This is used in situations where a blank value like `None` is acceptable
+    usually as the default value of a class variable or function parameter
+    (iow, usually when `None` is a valid value.)
+    """
+
+    def __str__(self):
+        return "<unset value>"
+
+    def __repr__(self):
+        return "<unset value>"
+
+    def __bool__(self):
+        return False
+
+    def __nonzero__(self):
+        return False
+
+
+unset_value = UnsetValue()
+
+
+class WebobInputWrapper:
+    """
+    Wrap a webob MultiDict for use as passing as `formdata` to Field.
+
+    Since for consistency, we have decided in WTForms to support as input a
+    small subset of the API provided in common between cgi.FieldStorage,
+    Django's QueryDict, and Werkzeug's MultiDict, we need to wrap Webob, the
+    only supported framework whose multidict does not fit this API, but is
+    nevertheless used by a lot of frameworks.
+
+    While we could write a full wrapper to support all the methods, this will
+    undoubtedly result in bugs due to some subtle differences between the
+    various wrappers. So we will keep it simple.
+    """
+
+    def __init__(self, multidict):
+        self._wrapped = multidict
+
+    def __iter__(self):
+        return iter(self._wrapped)
+
+    def __len__(self):
+        return len(self._wrapped)
+
+    def __contains__(self, name):
+        return name in self._wrapped
+
+    def getlist(self, name):
+        return self._wrapped.getall(name)
diff --git a/venv/Lib/site-packages/wtforms/validators.py b/venv/Lib/site-packages/wtforms/validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..59134f20fe1463d37f9c13576edb9ea8efb34fb7
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/validators.py
@@ -0,0 +1,732 @@
+import ipaddress
+import math
+import re
+import uuid
+
+__all__ = (
+    "DataRequired",
+    "data_required",
+    "Email",
+    "email",
+    "EqualTo",
+    "equal_to",
+    "IPAddress",
+    "ip_address",
+    "InputRequired",
+    "input_required",
+    "Length",
+    "length",
+    "NumberRange",
+    "number_range",
+    "Optional",
+    "optional",
+    "Regexp",
+    "regexp",
+    "URL",
+    "url",
+    "AnyOf",
+    "any_of",
+    "NoneOf",
+    "none_of",
+    "MacAddress",
+    "mac_address",
+    "UUID",
+    "ValidationError",
+    "StopValidation",
+    "readonly",
+    "ReadOnly",
+    "disabled",
+    "Disabled",
+)
+
+
+class ValidationError(ValueError):
+    """
+    Raised when a validator fails to validate its input.
+    """
+
+    def __init__(self, message="", *args, **kwargs):
+        ValueError.__init__(self, message, *args, **kwargs)
+
+
+class StopValidation(Exception):
+    """
+    Causes the validation chain to stop.
+
+    If StopValidation is raised, no more validators in the validation chain are
+    called. If raised with a message, the message will be added to the errors
+    list.
+    """
+
+    def __init__(self, message="", *args, **kwargs):
+        Exception.__init__(self, message, *args, **kwargs)
+
+
+class EqualTo:
+    """
+    Compares the values of two fields.
+
+    :param fieldname:
+        The name of the other field to compare to.
+    :param message:
+        Error message to raise in case of a validation error. Can be
+        interpolated with `%(other_label)s` and `%(other_name)s` to provide a
+        more helpful error.
+    """
+
+    def __init__(self, fieldname, message=None):
+        self.fieldname = fieldname
+        self.message = message
+
+    def __call__(self, form, field):
+        try:
+            other = form[self.fieldname]
+        except KeyError as exc:
+            raise ValidationError(
+                field.gettext("Invalid field name '%s'.") % self.fieldname
+            ) from exc
+        if field.data == other.data:
+            return
+
+        d = {
+            "other_label": hasattr(other, "label")
+            and other.label.text
+            or self.fieldname,
+            "other_name": self.fieldname,
+        }
+        message = self.message
+        if message is None:
+            message = field.gettext("Field must be equal to %(other_name)s.")
+
+        raise ValidationError(message % d)
+
+
+class Length:
+    """
+    Validates the length of a string.
+
+    :param min:
+        The minimum required length of the string. If not provided, minimum
+        length will not be checked.
+    :param max:
+        The maximum length of the string. If not provided, maximum length
+        will not be checked.
+    :param message:
+        Error message to raise in case of a validation error. Can be
+        interpolated using `%(min)d` and `%(max)d` if desired. Useful defaults
+        are provided depending on the existence of min and max.
+
+    When supported, sets the `minlength` and `maxlength` attributes on widgets.
+    """
+
+    def __init__(self, min=-1, max=-1, message=None):
+        assert (
+            min != -1 or max != -1
+        ), "At least one of `min` or `max` must be specified."
+        assert max == -1 or min <= max, "`min` cannot be more than `max`."
+        self.min = min
+        self.max = max
+        self.message = message
+        self.field_flags = {}
+        if self.min != -1:
+            self.field_flags["minlength"] = self.min
+        if self.max != -1:
+            self.field_flags["maxlength"] = self.max
+
+    def __call__(self, form, field):
+        length = field.data and len(field.data) or 0
+        if length >= self.min and (self.max == -1 or length <= self.max):
+            return
+
+        if self.message is not None:
+            message = self.message
+
+        elif self.max == -1:
+            message = field.ngettext(
+                "Field must be at least %(min)d character long.",
+                "Field must be at least %(min)d characters long.",
+                self.min,
+            )
+        elif self.min == -1:
+            message = field.ngettext(
+                "Field cannot be longer than %(max)d character.",
+                "Field cannot be longer than %(max)d characters.",
+                self.max,
+            )
+        elif self.min == self.max:
+            message = field.ngettext(
+                "Field must be exactly %(max)d character long.",
+                "Field must be exactly %(max)d characters long.",
+                self.max,
+            )
+        else:
+            message = field.gettext(
+                "Field must be between %(min)d and %(max)d characters long."
+            )
+
+        raise ValidationError(message % dict(min=self.min, max=self.max, length=length))
+
+
+class NumberRange:
+    """
+    Validates that a number is of a minimum and/or maximum value, inclusive.
+    This will work with any comparable number type, such as floats and
+    decimals, not just integers.
+
+    :param min:
+        The minimum required value of the number. If not provided, minimum
+        value will not be checked.
+    :param max:
+        The maximum value of the number. If not provided, maximum value
+        will not be checked.
+    :param message:
+        Error message to raise in case of a validation error. Can be
+        interpolated using `%(min)s` and `%(max)s` if desired. Useful defaults
+        are provided depending on the existence of min and max.
+
+    When supported, sets the `min` and `max` attributes on widgets.
+    """
+
+    def __init__(self, min=None, max=None, message=None):
+        self.min = min
+        self.max = max
+        self.message = message
+        self.field_flags = {}
+        if self.min is not None:
+            self.field_flags["min"] = self.min
+        if self.max is not None:
+            self.field_flags["max"] = self.max
+
+    def __call__(self, form, field):
+        data = field.data
+        if (
+            data is not None
+            and not math.isnan(data)
+            and (self.min is None or data >= self.min)
+            and (self.max is None or data <= self.max)
+        ):
+            return
+
+        if self.message is not None:
+            message = self.message
+
+        # we use %(min)s interpolation to support floats, None, and
+        # Decimals without throwing a formatting exception.
+        elif self.max is None:
+            message = field.gettext("Number must be at least %(min)s.")
+
+        elif self.min is None:
+            message = field.gettext("Number must be at most %(max)s.")
+
+        else:
+            message = field.gettext("Number must be between %(min)s and %(max)s.")
+
+        raise ValidationError(message % dict(min=self.min, max=self.max))
+
+
+class Optional:
+    """
+    Allows empty input and stops the validation chain from continuing.
+
+    If input is empty, also removes prior errors (such as processing errors)
+    from the field.
+
+    :param strip_whitespace:
+        If True (the default) also stop the validation chain on input which
+        consists of only whitespace.
+
+    Sets the `optional` attribute on widgets.
+    """
+
+    def __init__(self, strip_whitespace=True):
+        if strip_whitespace:
+            self.string_check = lambda s: s.strip()
+        else:
+            self.string_check = lambda s: s
+
+        self.field_flags = {"optional": True}
+
+    def __call__(self, form, field):
+        if (
+            not field.raw_data
+            or isinstance(field.raw_data[0], str)
+            and not self.string_check(field.raw_data[0])
+        ):
+            field.errors[:] = []
+            raise StopValidation()
+
+
+class DataRequired:
+    """
+    Checks the field's data is 'truthy' otherwise stops the validation chain.
+
+    This validator checks that the ``data`` attribute on the field is a 'true'
+    value (effectively, it does ``if field.data``.) Furthermore, if the data
+    is a string type, a string containing only whitespace characters is
+    considered false.
+
+    If the data is empty, also removes prior errors (such as processing errors)
+    from the field.
+
+    **NOTE** this validator used to be called `Required` but the way it behaved
+    (requiring coerced data, not input data) meant it functioned in a way
+    which was not symmetric to the `Optional` validator and furthermore caused
+    confusion with certain fields which coerced data to 'falsey' values like
+    ``0``, ``Decimal(0)``, ``time(0)`` etc. Unless a very specific reason
+    exists, we recommend using the :class:`InputRequired` instead.
+
+    :param message:
+        Error message to raise in case of a validation error.
+
+    Sets the `required` attribute on widgets.
+    """
+
+    def __init__(self, message=None):
+        self.message = message
+        self.field_flags = {"required": True}
+
+    def __call__(self, form, field):
+        if field.data and (not isinstance(field.data, str) or field.data.strip()):
+            return
+
+        if self.message is None:
+            message = field.gettext("This field is required.")
+        else:
+            message = self.message
+
+        field.errors[:] = []
+        raise StopValidation(message)
+
+
+class InputRequired:
+    """
+    Validates that input was provided for this field.
+
+    Note there is a distinction between this and DataRequired in that
+    InputRequired looks that form-input data was provided, and DataRequired
+    looks at the post-coercion data. This means that this validator only checks
+    whether non-empty data was sent, not whether non-empty data was coerced
+    from that data. Initially populated data is not considered sent.
+
+    Sets the `required` attribute on widgets.
+    """
+
+    def __init__(self, message=None):
+        self.message = message
+        self.field_flags = {"required": True}
+
+    def __call__(self, form, field):
+        if field.raw_data and field.raw_data[0]:
+            return
+
+        if self.message is None:
+            message = field.gettext("This field is required.")
+        else:
+            message = self.message
+
+        field.errors[:] = []
+        raise StopValidation(message)
+
+
+class Regexp:
+    """
+    Validates the field against a user provided regexp.
+
+    :param regex:
+        The regular expression string to use. Can also be a compiled regular
+        expression pattern.
+    :param flags:
+        The regexp flags to use, for example re.IGNORECASE. Ignored if
+        `regex` is not a string.
+    :param message:
+        Error message to raise in case of a validation error.
+    """
+
+    def __init__(self, regex, flags=0, message=None):
+        if isinstance(regex, str):
+            regex = re.compile(regex, flags)
+        self.regex = regex
+        self.message = message
+
+    def __call__(self, form, field, message=None):
+        match = self.regex.match(field.data or "")
+        if match:
+            return match
+
+        if message is None:
+            if self.message is None:
+                message = field.gettext("Invalid input.")
+            else:
+                message = self.message
+
+        raise ValidationError(message)
+
+
+class Email:
+    """
+    Validates an email address. Requires email_validator package to be
+    installed. For ex: pip install wtforms[email].
+
+    :param message:
+        Error message to raise in case of a validation error.
+    :param granular_message:
+        Use validation failed message from email_validator library
+        (Default False).
+    :param check_deliverability:
+        Perform domain name resolution check (Default False).
+    :param allow_smtputf8:
+        Fail validation for addresses that would require SMTPUTF8
+        (Default True).
+    :param allow_empty_local:
+        Allow an empty local part (i.e. @example.com), e.g. for validating
+        Postfix aliases (Default False).
+    """
+
+    def __init__(
+        self,
+        message=None,
+        granular_message=False,
+        check_deliverability=False,
+        allow_smtputf8=True,
+        allow_empty_local=False,
+    ):
+        self.message = message
+        self.granular_message = granular_message
+        self.check_deliverability = check_deliverability
+        self.allow_smtputf8 = allow_smtputf8
+        self.allow_empty_local = allow_empty_local
+
+    def __call__(self, form, field):
+        try:
+            import email_validator
+        except ImportError as exc:  # pragma: no cover
+            raise Exception(
+                "Install 'email_validator' for email validation support."
+            ) from exc
+
+        try:
+            if field.data is None:
+                raise email_validator.EmailNotValidError()
+            email_validator.validate_email(
+                field.data,
+                check_deliverability=self.check_deliverability,
+                allow_smtputf8=self.allow_smtputf8,
+                allow_empty_local=self.allow_empty_local,
+            )
+        except email_validator.EmailNotValidError as e:
+            message = self.message
+            if message is None:
+                if self.granular_message:
+                    message = field.gettext(e)
+                else:
+                    message = field.gettext("Invalid email address.")
+            raise ValidationError(message) from e
+
+
+class IPAddress:
+    """
+    Validates an IP address.
+
+    :param ipv4:
+        If True, accept IPv4 addresses as valid (default True)
+    :param ipv6:
+        If True, accept IPv6 addresses as valid (default False)
+    :param message:
+        Error message to raise in case of a validation error.
+    """
+
+    def __init__(self, ipv4=True, ipv6=False, message=None):
+        if not ipv4 and not ipv6:
+            raise ValueError(
+                "IP Address Validator must have at least one of ipv4 or ipv6 enabled."
+            )
+        self.ipv4 = ipv4
+        self.ipv6 = ipv6
+        self.message = message
+
+    def __call__(self, form, field):
+        value = field.data
+        valid = False
+        if value:
+            valid = (self.ipv4 and self.check_ipv4(value)) or (
+                self.ipv6 and self.check_ipv6(value)
+            )
+
+        if valid:
+            return
+
+        message = self.message
+        if message is None:
+            message = field.gettext("Invalid IP address.")
+        raise ValidationError(message)
+
+    @classmethod
+    def check_ipv4(cls, value):
+        try:
+            address = ipaddress.ip_address(value)
+        except ValueError:
+            return False
+
+        if not isinstance(address, ipaddress.IPv4Address):
+            return False
+
+        return True
+
+    @classmethod
+    def check_ipv6(cls, value):
+        try:
+            address = ipaddress.ip_address(value)
+        except ValueError:
+            return False
+
+        if not isinstance(address, ipaddress.IPv6Address):
+            return False
+
+        return True
+
+
+class MacAddress(Regexp):
+    """
+    Validates a MAC address.
+
+    :param message:
+        Error message to raise in case of a validation error.
+    """
+
+    def __init__(self, message=None):
+        pattern = r"^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"
+        super().__init__(pattern, message=message)
+
+    def __call__(self, form, field):
+        message = self.message
+        if message is None:
+            message = field.gettext("Invalid Mac address.")
+
+        super().__call__(form, field, message)
+
+
+class URL(Regexp):
+    """
+    Simple regexp based url validation. Much like the email validator, you
+    probably want to validate the url later by other means if the url must
+    resolve.
+
+    :param require_tld:
+        If true, then the domain-name portion of the URL must contain a .tld
+        suffix.  Set this to false if you want to allow domains like
+        `localhost`.
+    :param allow_ip:
+        If false, then give ip as host will fail validation
+    :param message:
+        Error message to raise in case of a validation error.
+    """
+
+    def __init__(self, require_tld=True, allow_ip=True, message=None):
+        regex = (
+            r"^[a-z]+://"
+            r"(?P<host>[^\/\?:]+)"
+            r"(?P<port>:[0-9]+)?"
+            r"(?P<path>\/.*?)?"
+            r"(?P<query>\?.*)?$"
+        )
+        super().__init__(regex, re.IGNORECASE, message)
+        self.validate_hostname = HostnameValidation(
+            require_tld=require_tld, allow_ip=allow_ip
+        )
+
+    def __call__(self, form, field):
+        message = self.message
+        if message is None:
+            message = field.gettext("Invalid URL.")
+
+        match = super().__call__(form, field, message)
+        if not self.validate_hostname(match.group("host")):
+            raise ValidationError(message)
+
+
+class UUID:
+    """
+    Validates a UUID.
+
+    :param message:
+        Error message to raise in case of a validation error.
+    """
+
+    def __init__(self, message=None):
+        self.message = message
+
+    def __call__(self, form, field):
+        message = self.message
+        if message is None:
+            message = field.gettext("Invalid UUID.")
+        try:
+            uuid.UUID(field.data)
+        except ValueError as exc:
+            raise ValidationError(message) from exc
+
+
+class AnyOf:
+    """
+    Compares the incoming data to a sequence of valid inputs.
+
+    :param values:
+        A sequence of valid inputs.
+    :param message:
+        Error message to raise in case of a validation error. `%(values)s`
+        contains the list of values.
+    :param values_formatter:
+        Function used to format the list of values in the error message.
+    """
+
+    def __init__(self, values, message=None, values_formatter=None):
+        self.values = values
+        self.message = message
+        if values_formatter is None:
+            values_formatter = self.default_values_formatter
+        self.values_formatter = values_formatter
+
+    def __call__(self, form, field):
+        if field.data in self.values:
+            return
+
+        message = self.message
+        if message is None:
+            message = field.gettext("Invalid value, must be one of: %(values)s.")
+
+        raise ValidationError(message % dict(values=self.values_formatter(self.values)))
+
+    @staticmethod
+    def default_values_formatter(values):
+        return ", ".join(str(x) for x in values)
+
+
+class NoneOf:
+    """
+    Compares the incoming data to a sequence of invalid inputs.
+
+    :param values:
+        A sequence of invalid inputs.
+    :param message:
+        Error message to raise in case of a validation error. `%(values)s`
+        contains the list of values.
+    :param values_formatter:
+        Function used to format the list of values in the error message.
+    """
+
+    def __init__(self, values, message=None, values_formatter=None):
+        self.values = values
+        self.message = message
+        if values_formatter is None:
+            values_formatter = self.default_values_formatter
+        self.values_formatter = values_formatter
+
+    def __call__(self, form, field):
+        if field.data not in self.values:
+            return
+
+        message = self.message
+        if message is None:
+            message = field.gettext("Invalid value, can't be any of: %(values)s.")
+
+        raise ValidationError(message % dict(values=self.values_formatter(self.values)))
+
+    @staticmethod
+    def default_values_formatter(v):
+        return ", ".join(str(x) for x in v)
+
+
+class HostnameValidation:
+    """
+    Helper class for checking hostnames for validation.
+
+    This is not a validator in and of itself, and as such is not exported.
+    """
+
+    hostname_part = re.compile(r"^(xn-|[a-z0-9_]+)(-[a-z0-9_-]+)*$", re.IGNORECASE)
+    tld_part = re.compile(r"^([a-z]{2,20}|xn--([a-z0-9]+-)*[a-z0-9]+)$", re.IGNORECASE)
+
+    def __init__(self, require_tld=True, allow_ip=False):
+        self.require_tld = require_tld
+        self.allow_ip = allow_ip
+
+    def __call__(self, hostname):
+        if self.allow_ip and (
+            IPAddress.check_ipv4(hostname) or IPAddress.check_ipv6(hostname)
+        ):
+            return True
+
+        # Encode out IDNA hostnames. This makes further validation easier.
+        try:
+            hostname = hostname.encode("idna")
+        except UnicodeError:
+            pass
+
+        # Turn back into a string in Python 3x
+        if not isinstance(hostname, str):
+            hostname = hostname.decode("ascii")
+
+        if len(hostname) > 253:
+            return False
+
+        # Check that all labels in the hostname are valid
+        parts = hostname.split(".")
+        for part in parts:
+            if not part or len(part) > 63:
+                return False
+            if not self.hostname_part.match(part):
+                return False
+
+        if self.require_tld and (len(parts) < 2 or not self.tld_part.match(parts[-1])):
+            return False
+
+        return True
+
+
+class ReadOnly:
+    """
+    Set a field readonly.
+
+    Validation fails if the form data is different than the
+    field object data, or if unset, from the field default data.
+    """
+
+    def __init__(self):
+        self.field_flags = {"readonly": True}
+
+    def __call__(self, form, field):
+        if field.data != field.object_data:
+            raise ValidationError(field.gettext("This field cannot be edited"))
+
+
+class Disabled:
+    """
+    Set a field disabled.
+
+    Validation fails if the form data has any value.
+    """
+
+    def __init__(self):
+        self.field_flags = {"disabled": True}
+
+    def __call__(self, form, field):
+        if field.raw_data is not None:
+            raise ValidationError(
+                field.gettext("This field is disabled and cannot have a value")
+            )
+
+
+email = Email
+equal_to = EqualTo
+ip_address = IPAddress
+mac_address = MacAddress
+length = Length
+number_range = NumberRange
+optional = Optional
+input_required = InputRequired
+data_required = DataRequired
+regexp = Regexp
+url = URL
+any_of = AnyOf
+none_of = NoneOf
+readonly = ReadOnly
+disabled = Disabled
diff --git a/venv/Lib/site-packages/wtforms/widgets/__init__.py b/venv/Lib/site-packages/wtforms/widgets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c87f108dd05d21922d67355913c72e948f3aa69b
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/widgets/__init__.py
@@ -0,0 +1,3 @@
+from wtforms.widgets.core import *
+from wtforms.widgets.core import html_params
+from wtforms.widgets.core import Input
diff --git a/venv/Lib/site-packages/wtforms/widgets/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/wtforms/widgets/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3ffc460ac797379969395b3a69246472d482f9a
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/widgets/__pycache__/__init__.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/widgets/__pycache__/core.cpython-311.pyc b/venv/Lib/site-packages/wtforms/widgets/__pycache__/core.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80729ce0b091bf7bd977c41b3847b3506f9341ec
Binary files /dev/null and b/venv/Lib/site-packages/wtforms/widgets/__pycache__/core.cpython-311.pyc differ
diff --git a/venv/Lib/site-packages/wtforms/widgets/core.py b/venv/Lib/site-packages/wtforms/widgets/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..62d86e94ab48d37608b83341a20f0d12cae7377c
--- /dev/null
+++ b/venv/Lib/site-packages/wtforms/widgets/core.py
@@ -0,0 +1,583 @@
+import warnings
+
+from markupsafe import escape
+from markupsafe import Markup
+
+__all__ = (
+    "CheckboxInput",
+    "ColorInput",
+    "DateInput",
+    "DateTimeInput",
+    "DateTimeLocalInput",
+    "EmailInput",
+    "FileInput",
+    "HiddenInput",
+    "ListWidget",
+    "MonthInput",
+    "NumberInput",
+    "Option",
+    "PasswordInput",
+    "RadioInput",
+    "RangeInput",
+    "SearchInput",
+    "Select",
+    "SubmitInput",
+    "TableWidget",
+    "TextArea",
+    "TextInput",
+    "TelInput",
+    "TimeInput",
+    "URLInput",
+    "WeekInput",
+)
+
+
+def clean_key(key):
+    key = key.rstrip("_")
+    if key.startswith("data_") or key.startswith("aria_"):
+        key = key.replace("_", "-")
+    return key
+
+
+def html_params(**kwargs):
+    """
+    Generate HTML attribute syntax from inputted keyword arguments.
+
+    The output value is sorted by the passed keys, to provide consistent output
+    each time this function is called with the same parameters. Because of the
+    frequent use of the normally reserved keywords `class` and `for`, suffixing
+    these with an underscore will allow them to be used.
+
+    In order to facilitate the use of ``data-`` and ``aria-`` attributes, if the
+    name of the attribute begins with ``data_`` or ``aria_``, then every
+    underscore will be replaced with a hyphen in the generated attribute.
+
+    >>> html_params(data_attr='user.name', aria_labeledby='name')
+    'data-attr="user.name" aria-labeledby="name"'
+
+    In addition, the values ``True`` and ``False`` are special:
+      * ``attr=True`` generates the HTML compact output of a boolean attribute,
+        e.g. ``checked=True`` will generate simply ``checked``
+      * ``attr=False`` will be ignored and generate no output.
+
+    >>> html_params(name='text1', id='f', class_='text')
+    'class="text" id="f" name="text1"'
+    >>> html_params(checked=True, readonly=False, name="text1", abc="hello")
+    'abc="hello" checked name="text1"'
+
+    .. versionchanged:: 3.0
+        ``aria_`` args convert underscores to hyphens like ``data_``
+        args.
+
+    .. versionchanged:: 2.2
+        ``data_`` args convert all underscores to hyphens, instead of
+        only the first one.
+    """
+    params = []
+    for k, v in sorted(kwargs.items()):
+        k = clean_key(k)
+        if v is True:
+            params.append(k)
+        elif v is False:
+            pass
+        else:
+            params.append(f'{str(k)}="{escape(v)}"')  # noqa: B907
+    return " ".join(params)
+
+
+class ListWidget:
+    """
+    Renders a list of fields as a `ul` or `ol` list.
+
+    This is used for fields which encapsulate many inner fields as subfields.
+    The widget will try to iterate the field to get access to the subfields and
+    call them to render them.
+
+    If `prefix_label` is set, the subfield's label is printed before the field,
+    otherwise afterwards. The latter is useful for iterating radios or
+    checkboxes.
+    """
+
+    def __init__(self, html_tag="ul", prefix_label=True):
+        assert html_tag in ("ol", "ul")
+        self.html_tag = html_tag
+        self.prefix_label = prefix_label
+
+    def __call__(self, field, **kwargs):
+        kwargs.setdefault("id", field.id)
+        html = [f"<{self.html_tag} {html_params(**kwargs)}>"]
+        for subfield in field:
+            if self.prefix_label:
+                html.append(f"<li>{subfield.label} {subfield()}</li>")
+            else:
+                html.append(f"<li>{subfield()} {subfield.label}</li>")
+        html.append("</%s>" % self.html_tag)
+        return Markup("".join(html))
+
+
+class TableWidget:
+    """
+    Renders a list of fields as a set of table rows with th/td pairs.
+
+    If `with_table_tag` is True, then an enclosing <table> is placed around the
+    rows.
+
+    Hidden fields will not be displayed with a row, instead the field will be
+    pushed into a subsequent table row to ensure XHTML validity. Hidden fields
+    at the end of the field list will appear outside the table.
+    """
+
+    def __init__(self, with_table_tag=True):
+        self.with_table_tag = with_table_tag
+
+    def __call__(self, field, **kwargs):
+        html = []
+        if self.with_table_tag:
+            kwargs.setdefault("id", field.id)
+            html.append("<table %s>" % html_params(**kwargs))
+        hidden = ""
+        for subfield in field:
+            if subfield.type in ("HiddenField", "CSRFTokenField"):
+                hidden += str(subfield)
+            else:
+                html.append(
+                    "<tr><th>%s</th><td>%s%s</td></tr>"
+                    % (str(subfield.label), hidden, str(subfield))
+                )
+                hidden = ""
+        if self.with_table_tag:
+            html.append("</table>")
+        if hidden:
+            html.append(hidden)
+        return Markup("".join(html))
+
+
+class Input:
+    """
+    Render a basic ``<input>`` field.
+
+    This is used as the basis for most of the other input fields.
+
+    By default, the `_value()` method will be called upon the associated field
+    to provide the ``value=`` HTML attribute.
+    """
+
+    html_params = staticmethod(html_params)
+    validation_attrs = ["required", "disabled"]
+
+    def __init__(self, input_type=None):
+        if input_type is not None:
+            self.input_type = input_type
+
+    def __call__(self, field, **kwargs):
+        kwargs.setdefault("id", field.id)
+        kwargs.setdefault("type", self.input_type)
+        if "value" not in kwargs:
+            kwargs["value"] = field._value()
+        flags = getattr(field, "flags", {})
+        for k in dir(flags):
+            if k in self.validation_attrs and k not in kwargs:
+                kwargs[k] = getattr(flags, k)
+        return Markup("<input %s>" % self.html_params(name=field.name, **kwargs))
+
+
+class TextInput(Input):
+    """
+    Render a single-line text input.
+    """
+
+    input_type = "text"
+    validation_attrs = [
+        "required",
+        "disabled",
+        "readonly",
+        "maxlength",
+        "minlength",
+        "pattern",
+    ]
+
+
+class PasswordInput(Input):
+    """
+    Render a password input.
+
+    For security purposes, this field will not reproduce the value on a form
+    submit by default. To have the value filled in, set `hide_value` to
+    `False`.
+    """
+
+    input_type = "password"
+    validation_attrs = [
+        "required",
+        "disabled",
+        "readonly",
+        "maxlength",
+        "minlength",
+        "pattern",
+    ]
+
+    def __init__(self, hide_value=True):
+        self.hide_value = hide_value
+
+    def __call__(self, field, **kwargs):
+        if self.hide_value:
+            kwargs["value"] = ""
+        return super().__call__(field, **kwargs)
+
+
+class HiddenInput(Input):
+    """
+    Render a hidden input.
+    """
+
+    input_type = "hidden"
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.field_flags = {"hidden": True}
+
+
+class CheckboxInput(Input):
+    """
+    Render a checkbox.
+
+    The ``checked`` HTML attribute is set if the field's data is a non-false value.
+    """
+
+    input_type = "checkbox"
+
+    def __call__(self, field, **kwargs):
+        if getattr(field, "checked", field.data):
+            kwargs["checked"] = True
+        return super().__call__(field, **kwargs)
+
+
+class RadioInput(Input):
+    """
+    Render a single radio button.
+
+    This widget is most commonly used in conjunction with ListWidget or some
+    other listing, as singular radio buttons are not very useful.
+    """
+
+    input_type = "radio"
+
+    def __call__(self, field, **kwargs):
+        if field.checked:
+            kwargs["checked"] = True
+        return super().__call__(field, **kwargs)
+
+
+class FileInput(Input):
+    """Render a file chooser input.
+
+    :param multiple: allow choosing multiple files
+    """
+
+    input_type = "file"
+    validation_attrs = ["required", "disabled", "accept"]
+
+    def __init__(self, multiple=False):
+        super().__init__()
+        self.multiple = multiple
+
+    def __call__(self, field, **kwargs):
+        # browser ignores value of file input for security
+        kwargs["value"] = False
+
+        if self.multiple:
+            kwargs["multiple"] = True
+
+        return super().__call__(field, **kwargs)
+
+
+class SubmitInput(Input):
+    """
+    Renders a submit button.
+
+    The field's label is used as the text of the submit button instead of the
+    data on the field.
+    """
+
+    input_type = "submit"
+
+    def __call__(self, field, **kwargs):
+        kwargs.setdefault("value", field.label.text)
+        return super().__call__(field, **kwargs)
+
+
+class TextArea:
+    """
+    Renders a multi-line text area.
+
+    `rows` and `cols` ought to be passed as keyword args when rendering.
+    """
+
+    validation_attrs = ["required", "disabled", "readonly", "maxlength", "minlength"]
+
+    def __call__(self, field, **kwargs):
+        kwargs.setdefault("id", field.id)
+        flags = getattr(field, "flags", {})
+        for k in dir(flags):
+            if k in self.validation_attrs and k not in kwargs:
+                kwargs[k] = getattr(flags, k)
+        return Markup(
+            "<textarea %s>\r\n%s</textarea>"
+            % (html_params(name=field.name, **kwargs), escape(field._value()))
+        )
+
+
+class Select:
+    """
+    Renders a select field.
+
+    If `multiple` is True, then the `size` property should be specified on
+    rendering to make the field useful.
+
+    The field must provide an `iter_choices()` method which the widget will
+    call on rendering; this method must yield tuples of
+    `(value, label, selected)` or `(value, label, selected, render_kw)`.
+    It also must provide a `has_groups()` method which tells whether choices
+    are divided into groups, and if they do, the field must have an
+    `iter_groups()` method that yields tuples of `(label, choices)`, where
+    `choices` is a iterable of `(value, label, selected)` tuples.
+    """
+
+    validation_attrs = ["required", "disabled"]
+
+    def __init__(self, multiple=False):
+        self.multiple = multiple
+
+    def __call__(self, field, **kwargs):
+        kwargs.setdefault("id", field.id)
+        if self.multiple:
+            kwargs["multiple"] = True
+        flags = getattr(field, "flags", {})
+        for k in dir(flags):
+            if k in self.validation_attrs and k not in kwargs:
+                kwargs[k] = getattr(flags, k)
+        html = ["<select %s>" % html_params(name=field.name, **kwargs)]
+        if field.has_groups():
+            for group, choices in field.iter_groups():
+                html.append("<optgroup %s>" % html_params(label=group))
+                for choice in choices:
+                    if len(choice) == 4:
+                        val, label, selected, render_kw = choice
+                    else:
+                        warnings.warn(
+                            "'iter_groups' is expected to return 4 items tuple since "
+                            "wtforms 3.1, this will be mandatory in wtforms 3.2",
+                            DeprecationWarning,
+                            stacklevel=2,
+                        )
+                        val, label, selected = choice
+                        render_kw = {}
+                    html.append(self.render_option(val, label, selected, **render_kw))
+                html.append("</optgroup>")
+        else:
+            for choice in field.iter_choices():
+                if len(choice) == 4:
+                    val, label, selected, render_kw = choice
+                else:
+                    warnings.warn(
+                        "'iter_groups' is expected to return 4 items tuple since "
+                        "wtforms 3.1, this will be mandatory in wtforms 3.2",
+                        DeprecationWarning,
+                        stacklevel=2,
+                    )
+                    val, label, selected = choice
+                    render_kw = {}
+                html.append(self.render_option(val, label, selected, **render_kw))
+        html.append("</select>")
+        return Markup("".join(html))
+
+    @classmethod
+    def render_option(cls, value, label, selected, **kwargs):
+        if value is True:
+            # Handle the special case of a 'True' value.
+            value = str(value)
+
+        options = dict(kwargs, value=value)
+        if selected:
+            options["selected"] = True
+        return Markup(f"<option {html_params(**options)}>{escape(label)}</option>")
+
+
+class Option:
+    """
+    Renders the individual option from a select field.
+
+    This is just a convenience for various custom rendering situations, and an
+    option by itself does not constitute an entire field.
+    """
+
+    def __call__(self, field, **kwargs):
+        return Select.render_option(
+            field._value(), field.label.text, field.checked, **kwargs
+        )
+
+
+class SearchInput(Input):
+    """
+    Renders an input with type "search".
+    """
+
+    input_type = "search"
+    validation_attrs = [
+        "required",
+        "disabled",
+        "readonly",
+        "maxlength",
+        "minlength",
+        "pattern",
+    ]
+
+
+class TelInput(Input):
+    """
+    Renders an input with type "tel".
+    """
+
+    input_type = "tel"
+    validation_attrs = [
+        "required",
+        "disabled",
+        "readonly",
+        "maxlength",
+        "minlength",
+        "pattern",
+    ]
+
+
+class URLInput(Input):
+    """
+    Renders an input with type "url".
+    """
+
+    input_type = "url"
+    validation_attrs = [
+        "required",
+        "disabled",
+        "readonly",
+        "maxlength",
+        "minlength",
+        "pattern",
+    ]
+
+
+class EmailInput(Input):
+    """
+    Renders an input with type "email".
+    """
+
+    input_type = "email"
+    validation_attrs = [
+        "required",
+        "disabled",
+        "readonly",
+        "maxlength",
+        "minlength",
+        "pattern",
+    ]
+
+
+class DateTimeInput(Input):
+    """
+    Renders an input with type "datetime".
+    """
+
+    input_type = "datetime"
+    validation_attrs = ["required", "disabled", "readonly", "max", "min", "step"]
+
+
+class DateInput(Input):
+    """
+    Renders an input with type "date".
+    """
+
+    input_type = "date"
+    validation_attrs = ["required", "disabled", "readonly", "max", "min", "step"]
+
+
+class MonthInput(Input):
+    """
+    Renders an input with type "month".
+    """
+
+    input_type = "month"
+    validation_attrs = ["required", "disabled", "readonly", "max", "min", "step"]
+
+
+class WeekInput(Input):
+    """
+    Renders an input with type "week".
+    """
+
+    input_type = "week"
+    validation_attrs = ["required", "disabled", "readonly", "max", "min", "step"]
+
+
+class TimeInput(Input):
+    """
+    Renders an input with type "time".
+    """
+
+    input_type = "time"
+    validation_attrs = ["required", "disabled", "readonly", "max", "min", "step"]
+
+
+class DateTimeLocalInput(Input):
+    """
+    Renders an input with type "datetime-local".
+    """
+
+    input_type = "datetime-local"
+    validation_attrs = ["required", "disabled", "readonly", "max", "min", "step"]
+
+
+class NumberInput(Input):
+    """
+    Renders an input with type "number".
+    """
+
+    input_type = "number"
+    validation_attrs = ["required", "disabled", "readonly", "max", "min", "step"]
+
+    def __init__(self, step=None, min=None, max=None):
+        self.step = step
+        self.min = min
+        self.max = max
+
+    def __call__(self, field, **kwargs):
+        if self.step is not None:
+            kwargs.setdefault("step", self.step)
+        if self.min is not None:
+            kwargs.setdefault("min", self.min)
+        if self.max is not None:
+            kwargs.setdefault("max", self.max)
+        return super().__call__(field, **kwargs)
+
+
+class RangeInput(Input):
+    """
+    Renders an input with type "range".
+    """
+
+    input_type = "range"
+    validation_attrs = ["required", "disabled", "max", "min", "step"]
+
+    def __init__(self, step=None):
+        self.step = step
+
+    def __call__(self, field, **kwargs):
+        if self.step is not None:
+            kwargs.setdefault("step", self.step)
+        return super().__call__(field, **kwargs)
+
+
+class ColorInput(Input):
+    """
+    Renders an input with type "color".
+    """
+
+    input_type = "color"
diff --git a/venv/Scripts/alembic.exe b/venv/Scripts/alembic.exe
new file mode 100644
index 0000000000000000000000000000000000000000..2b3f3d1069576fac087a5cafb5580ab53059c438
Binary files /dev/null and b/venv/Scripts/alembic.exe differ
diff --git a/venv/Scripts/mako-render.exe b/venv/Scripts/mako-render.exe
new file mode 100644
index 0000000000000000000000000000000000000000..c4af2612c00177e4e2df740185e23ba11bcbe31c
Binary files /dev/null and b/venv/Scripts/mako-render.exe differ
diff --git a/wsgi.py b/wsgi.py
index 61be2180d6a9f008a4a8f7c08a8503546a59166d..78681907a56f6e8852ba79e6565c4778be08abb6 100644
--- a/wsgi.py
+++ b/wsgi.py
@@ -1,4 +1,8 @@
+import sys
+import logging
 from app import app as application
 
+logging.basicConfig(stream=sys.stderr)
+
 if __name__ == '__main__':
-    app = application
+    application.run(host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))