| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953 |
- # dialects/sqlite/base.py
- # Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
- # <see AUTHORS file>
- #
- # This module is part of SQLAlchemy and is released under
- # the MIT License: https://www.opensource.org/licenses/mit-license.php
- # mypy: ignore-errors
- r'''
- .. dialect:: sqlite
- :name: SQLite
- :normal_support: 3.12+
- :best_effort: 3.7.16+
- .. _sqlite_datetime:
- Date and Time Types
- -------------------
- SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
- not provide out of the box functionality for translating values between Python
- `datetime` objects and a SQLite-supported format. SQLAlchemy's own
- :class:`~sqlalchemy.types.DateTime` and related types provide date formatting
- and parsing functionality when SQLite is used. The implementation classes are
- :class:`_sqlite.DATETIME`, :class:`_sqlite.DATE` and :class:`_sqlite.TIME`.
- These types represent dates and times as ISO formatted strings, which also
- nicely support ordering. There's no reliance on typical "libc" internals for
- these functions so historical dates are fully supported.
- Ensuring Text affinity
- ^^^^^^^^^^^^^^^^^^^^^^
- The DDL rendered for these types is the standard ``DATE``, ``TIME``
- and ``DATETIME`` indicators. However, custom storage formats can also be
- applied to these types. When the
- storage format is detected as containing no alpha characters, the DDL for
- these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
- so that the column continues to have textual affinity.
- .. seealso::
- `Type Affinity <https://www.sqlite.org/datatype3.html#affinity>`_ -
- in the SQLite documentation
- .. _sqlite_autoincrement:
- SQLite Auto Incrementing Behavior
- ----------------------------------
- Background on SQLite's autoincrement is at: https://sqlite.org/autoinc.html
- Key concepts:
- * SQLite has an implicit "auto increment" feature that takes place for any
- non-composite primary-key column that is specifically created using
- "INTEGER PRIMARY KEY" for the type + primary key.
- * SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
- equivalent to the implicit autoincrement feature; this keyword is not
- recommended for general use. SQLAlchemy does not render this keyword
- unless a special SQLite-specific directive is used (see below). However,
- it still requires that the column's type is named "INTEGER".
- Using the AUTOINCREMENT Keyword
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- To specifically render the AUTOINCREMENT keyword on the primary key column
- when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
- construct::
- Table(
- "sometable",
- metadata,
- Column("id", Integer, primary_key=True),
- sqlite_autoincrement=True,
- )
- Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- SQLite's typing model is based on naming conventions. Among other things, this
- means that any type name which contains the substring ``"INT"`` will be
- determined to be of "integer affinity". A type named ``"BIGINT"``,
- ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be
- of "integer" affinity. However, **the SQLite autoincrement feature, whether
- implicitly or explicitly enabled, requires that the name of the column's type
- is exactly the string "INTEGER"**. Therefore, if an application uses a type
- like :class:`.BigInteger` for a primary key, on SQLite this type will need to
- be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE
- TABLE`` statement in order for the autoincrement behavior to be available.
- One approach to achieve this is to use :class:`.Integer` on SQLite
- only using :meth:`.TypeEngine.with_variant`::
- table = Table(
- "my_table",
- metadata,
- Column(
- "id",
- BigInteger().with_variant(Integer, "sqlite"),
- primary_key=True,
- ),
- )
- Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
- name to be ``INTEGER`` when compiled against SQLite::
- from sqlalchemy import BigInteger
- from sqlalchemy.ext.compiler import compiles
- class SLBigInteger(BigInteger):
- pass
- @compiles(SLBigInteger, "sqlite")
- def bi_c(element, compiler, **kw):
- return "INTEGER"
- @compiles(SLBigInteger)
- def bi_c(element, compiler, **kw):
- return compiler.visit_BIGINT(element, **kw)
- table = Table(
- "my_table", metadata, Column("id", SLBigInteger(), primary_key=True)
- )
- .. seealso::
- :meth:`.TypeEngine.with_variant`
- :ref:`sqlalchemy.ext.compiler_toplevel`
- `Datatypes In SQLite Version 3 <https://sqlite.org/datatype3.html>`_
- .. _sqlite_transactions:
- Transactions with SQLite and the sqlite3 driver
- -----------------------------------------------
- As a file-based database, SQLite's approach to transactions differs from
- traditional databases in many ways. Additionally, the ``sqlite3`` driver
- standard with Python (as well as the async version ``aiosqlite`` which builds
- on top of it) has several quirks, workarounds, and API features in the
- area of transaction control, all of which generally need to be addressed when
- constructing a SQLAlchemy application that uses SQLite.
- Legacy Transaction Mode with the sqlite3 driver
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- The most important aspect of transaction handling with the sqlite3 driver is
- that it defaults (which will continue through Python 3.15 before being
- removed in Python 3.16) to legacy transactional behavior which does
- not strictly follow :pep:`249`. The way in which the driver diverges from the
- PEP is that it does not "begin" a transaction automatically as dictated by
- :pep:`249` except in the case of DML statements, e.g. INSERT, UPDATE, and
- DELETE. Normally, :pep:`249` dictates that a BEGIN must be emitted upon
- the first SQL statement of any kind, so that all subsequent operations will
- be established within a transaction until ``connection.commit()`` has been
- called. The ``sqlite3`` driver, in an effort to be easier to use in
- highly concurrent environments, skips this step for DQL (e.g. SELECT) statements,
- and also skips it for DDL (e.g. CREATE TABLE etc.) statements for more legacy
- reasons. Statements such as SAVEPOINT are also skipped.
- In modern versions of the ``sqlite3`` driver as of Python 3.12, this legacy
- mode of operation is referred to as
- `"legacy transaction control" <https://docs.python.org/3/library/sqlite3.html#sqlite3-transaction-control-isolation-level>`_, and is in
- effect by default due to the ``Connection.autocommit`` parameter being set to
- the constant ``sqlite3.LEGACY_TRANSACTION_CONTROL``. Prior to Python 3.12,
- the ``Connection.autocommit`` attribute did not exist.
- The implications of legacy transaction mode include:
- * **Incorrect support for transactional DDL** - statements like CREATE TABLE, ALTER TABLE,
- CREATE INDEX etc. will not automatically BEGIN a transaction if one were not
- started already, leading to the changes by each statement being
- "autocommitted" immediately unless BEGIN were otherwise emitted first. Very
- old (pre Python 3.6) versions of SQLite would also force a COMMIT for these
- operations even if a transaction were present, however this is no longer the
- case.
- * **SERIALIZABLE behavior not fully functional** - SQLite's transaction isolation
- behavior is normally consistent with SERIALIZABLE isolation, as it is a file-
- based system that locks the database file entirely for write operations,
- preventing COMMIT until all reader transactions (and associated file locks)
- have completed. However, sqlite3's legacy transaction mode fails to emit BEGIN for SELECT
- statements, which causes these SELECT statements to no longer be "repeatable",
- failing one of the consistency guarantees of SERIALIZABLE.
- * **Incorrect behavior for SAVEPOINT** - as the SAVEPOINT statement does not
- imply a BEGIN, a new SAVEPOINT emitted before a BEGIN will function on its
- own but fails to participate in the enclosing transaction, meaning a ROLLBACK
- of the transaction will not rollback elements that were part of a released
- savepoint.
- Legacy transaction mode first existed in order to faciliate working around
- SQLite's file locks. Because SQLite relies upon whole-file locks, it is easy to
- get "database is locked" errors, particularly when newer features like "write
- ahead logging" are disabled. This is a key reason why ``sqlite3``'s legacy
- transaction mode is still the default mode of operation; disabling it will
- produce behavior that is more susceptible to locked database errors. However
- note that **legacy transaction mode will no longer be the default** in a future
- Python version (3.16 as of this writing).
- .. _sqlite_enabling_transactions:
- Enabling Non-Legacy SQLite Transactional Modes with the sqlite3 or aiosqlite driver
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Current SQLAlchemy support allows either for setting the
- ``.Connection.autocommit`` attribute, most directly by using a
- :func:`._sa.create_engine` parameter, or if on an older version of Python where
- the attribute is not available, using event hooks to control the behavior of
- BEGIN.
- * **Enabling modern sqlite3 transaction control via the autocommit connect parameter** (Python 3.12 and above)
- To use SQLite in the mode described at `Transaction control via the autocommit attribute <https://docs.python.org/3/library/sqlite3.html#transaction-control-via-the-autocommit-attribute>`_,
- the most straightforward approach is to set the attribute to its recommended value
- of ``False`` at the connect level using :paramref:`_sa.create_engine.connect_args``::
- from sqlalchemy import create_engine
- engine = create_engine(
- "sqlite:///myfile.db", connect_args={"autocommit": False}
- )
- This parameter is also passed through when using the aiosqlite driver::
- from sqlalchemy.ext.asyncio import create_async_engine
- engine = create_async_engine(
- "sqlite+aiosqlite:///myfile.db", connect_args={"autocommit": False}
- )
- The parameter can also be set at the attribute level using the :meth:`.PoolEvents.connect`
- event hook, however this will only work for sqlite3, as aiosqlite does not yet expose this
- attribute on its ``Connection`` object::
- from sqlalchemy import create_engine, event
- engine = create_engine("sqlite:///myfile.db")
- @event.listens_for(engine, "connect")
- def do_connect(dbapi_connection, connection_record):
- # enable autocommit=False mode
- dbapi_connection.autocommit = False
- * **Using SQLAlchemy to emit BEGIN in lieu of SQLite's transaction control** (all Python versions, sqlite3 and aiosqlite)
- For older versions of ``sqlite3`` or for cross-compatiblity with older and
- newer versions, SQLAlchemy can also take over the job of transaction control.
- This is achieved by using the :meth:`.ConnectionEvents.begin` hook
- to emit the "BEGIN" command directly, while also disabling SQLite's control
- of this command using the :meth:`.PoolEvents.connect` event hook to set the
- ``Connection.isolation_level`` attribute to ``None``::
- from sqlalchemy import create_engine, event
- engine = create_engine("sqlite:///myfile.db")
- @event.listens_for(engine, "connect")
- def do_connect(dbapi_connection, connection_record):
- # disable sqlite3's emitting of the BEGIN statement entirely.
- dbapi_connection.isolation_level = None
- @event.listens_for(engine, "begin")
- def do_begin(conn):
- # emit our own BEGIN. sqlite3 still emits COMMIT/ROLLBACK correctly
- conn.exec_driver_sql("BEGIN")
- When using the asyncio variant ``aiosqlite``, refer to ``engine.sync_engine``
- as in the example below::
- from sqlalchemy import create_engine, event
- from sqlalchemy.ext.asyncio import create_async_engine
- engine = create_async_engine("sqlite+aiosqlite:///myfile.db")
- @event.listens_for(engine.sync_engine, "connect")
- def do_connect(dbapi_connection, connection_record):
- # disable aiosqlite's emitting of the BEGIN statement entirely.
- dbapi_connection.isolation_level = None
- @event.listens_for(engine.sync_engine, "begin")
- def do_begin(conn):
- # emit our own BEGIN. aiosqlite still emits COMMIT/ROLLBACK correctly
- conn.exec_driver_sql("BEGIN")
- .. _sqlite_isolation_level:
- Using SQLAlchemy's Driver Level AUTOCOMMIT Feature with SQLite
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- SQLAlchemy has a comprehensive database isolation feature with optional
- autocommit support that is introduced in the section :ref:`dbapi_autocommit`.
- For the ``sqlite3`` and ``aiosqlite`` drivers, SQLAlchemy only includes
- built-in support for "AUTOCOMMIT". Note that this mode is currently incompatible
- with the non-legacy isolation mode hooks documented in the previous
- section at :ref:`sqlite_enabling_transactions`.
- To use the ``sqlite3`` driver with SQLAlchemy driver-level autocommit,
- create an engine setting the :paramref:`_sa.create_engine.isolation_level`
- parameter to "AUTOCOMMIT"::
- eng = create_engine("sqlite:///myfile.db", isolation_level="AUTOCOMMIT")
- When using the above mode, any event hooks that set the sqlite3 ``Connection.autocommit``
- parameter away from its default of ``sqlite3.LEGACY_TRANSACTION_CONTROL``
- as well as hooks that emit ``BEGIN`` should be disabled.
- Additional Reading for SQLite / sqlite3 transaction control
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Links with important information on SQLite, the sqlite3 driver,
- as well as long historical conversations on how things got to their current state:
- * `Isolation in SQLite <https://www.sqlite.org/isolation.html>`_ - on the SQLite website
- * `Transaction control <https://docs.python.org/3/library/sqlite3.html#transaction-control>`_ - describes the sqlite3 autocommit attribute as well
- as the legacy isolation_level attribute.
- * `sqlite3 SELECT does not BEGIN a transaction, but should according to spec <https://github.com/python/cpython/issues/54133>`_ - imported Python standard library issue on github
- * `sqlite3 module breaks transactions and potentially corrupts data <https://github.com/python/cpython/issues/54949>`_ - imported Python standard library issue on github
- INSERT/UPDATE/DELETE...RETURNING
- ---------------------------------
- The SQLite dialect supports SQLite 3.35's ``INSERT|UPDATE|DELETE..RETURNING``
- syntax. ``INSERT..RETURNING`` may be used
- automatically in some cases in order to fetch newly generated identifiers in
- place of the traditional approach of using ``cursor.lastrowid``, however
- ``cursor.lastrowid`` is currently still preferred for simple single-statement
- cases for its better performance.
- To specify an explicit ``RETURNING`` clause, use the
- :meth:`._UpdateBase.returning` method on a per-statement basis::
- # INSERT..RETURNING
- result = connection.execute(
- table.insert().values(name="foo").returning(table.c.col1, table.c.col2)
- )
- print(result.all())
- # UPDATE..RETURNING
- result = connection.execute(
- table.update()
- .where(table.c.name == "foo")
- .values(name="bar")
- .returning(table.c.col1, table.c.col2)
- )
- print(result.all())
- # DELETE..RETURNING
- result = connection.execute(
- table.delete()
- .where(table.c.name == "foo")
- .returning(table.c.col1, table.c.col2)
- )
- print(result.all())
- .. versionadded:: 2.0 Added support for SQLite RETURNING
- .. _sqlite_foreign_keys:
- Foreign Key Support
- -------------------
- SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
- however by default these constraints have no effect on the operation of the
- table.
- Constraint checking on SQLite has three prerequisites:
- * At least version 3.6.19 of SQLite must be in use
- * The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
- or SQLITE_OMIT_TRIGGER symbols enabled.
- * The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
- connections before use -- including the initial call to
- :meth:`sqlalchemy.schema.MetaData.create_all`.
- SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
- new connections through the usage of events::
- from sqlalchemy.engine import Engine
- from sqlalchemy import event
- @event.listens_for(Engine, "connect")
- def set_sqlite_pragma(dbapi_connection, connection_record):
- # the sqlite3 driver will not set PRAGMA foreign_keys
- # if autocommit=False; set to True temporarily
- ac = dbapi_connection.autocommit
- dbapi_connection.autocommit = True
- cursor = dbapi_connection.cursor()
- cursor.execute("PRAGMA foreign_keys=ON")
- cursor.close()
- # restore previous autocommit setting
- dbapi_connection.autocommit = ac
- .. warning::
- When SQLite foreign keys are enabled, it is **not possible**
- to emit CREATE or DROP statements for tables that contain
- mutually-dependent foreign key constraints;
- to emit the DDL for these tables requires that ALTER TABLE be used to
- create or drop these constraints separately, for which SQLite has
- no support.
- .. seealso::
- `SQLite Foreign Key Support <https://www.sqlite.org/foreignkeys.html>`_
- - on the SQLite web site.
- :ref:`event_toplevel` - SQLAlchemy event API.
- :ref:`use_alter` - more information on SQLAlchemy's facilities for handling
- mutually-dependent foreign key constraints.
- .. _sqlite_on_conflict_ddl:
- ON CONFLICT support for constraints
- -----------------------------------
- .. seealso:: This section describes the :term:`DDL` version of "ON CONFLICT" for
- SQLite, which occurs within a CREATE TABLE statement. For "ON CONFLICT" as
- applied to an INSERT statement, see :ref:`sqlite_on_conflict_insert`.
- SQLite supports a non-standard DDL clause known as ON CONFLICT which can be applied
- to primary key, unique, check, and not null constraints. In DDL, it is
- rendered either within the "CONSTRAINT" clause or within the column definition
- itself depending on the location of the target constraint. To render this
- clause within DDL, the extension parameter ``sqlite_on_conflict`` can be
- specified with a string conflict resolution algorithm within the
- :class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`,
- :class:`.CheckConstraint` objects. Within the :class:`_schema.Column` object,
- there
- are individual parameters ``sqlite_on_conflict_not_null``,
- ``sqlite_on_conflict_primary_key``, ``sqlite_on_conflict_unique`` which each
- correspond to the three types of relevant constraint types that can be
- indicated from a :class:`_schema.Column` object.
- .. seealso::
- `ON CONFLICT <https://www.sqlite.org/lang_conflict.html>`_ - in the SQLite
- documentation
- .. versionadded:: 1.3
- The ``sqlite_on_conflict`` parameters accept a string argument which is just
- the resolution name to be chosen, which on SQLite can be one of ROLLBACK,
- ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint
- that specifies the IGNORE algorithm::
- some_table = Table(
- "some_table",
- metadata,
- Column("id", Integer, primary_key=True),
- Column("data", Integer),
- UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"),
- )
- The above renders CREATE TABLE DDL as:
- .. sourcecode:: sql
- CREATE TABLE some_table (
- id INTEGER NOT NULL,
- data INTEGER,
- PRIMARY KEY (id),
- UNIQUE (id, data) ON CONFLICT IGNORE
- )
- When using the :paramref:`_schema.Column.unique`
- flag to add a UNIQUE constraint
- to a single column, the ``sqlite_on_conflict_unique`` parameter can
- be added to the :class:`_schema.Column` as well, which will be added to the
- UNIQUE constraint in the DDL::
- some_table = Table(
- "some_table",
- metadata,
- Column("id", Integer, primary_key=True),
- Column(
- "data", Integer, unique=True, sqlite_on_conflict_unique="IGNORE"
- ),
- )
- rendering:
- .. sourcecode:: sql
- CREATE TABLE some_table (
- id INTEGER NOT NULL,
- data INTEGER,
- PRIMARY KEY (id),
- UNIQUE (data) ON CONFLICT IGNORE
- )
- To apply the FAIL algorithm for a NOT NULL constraint,
- ``sqlite_on_conflict_not_null`` is used::
- some_table = Table(
- "some_table",
- metadata,
- Column("id", Integer, primary_key=True),
- Column(
- "data", Integer, nullable=False, sqlite_on_conflict_not_null="FAIL"
- ),
- )
- this renders the column inline ON CONFLICT phrase:
- .. sourcecode:: sql
- CREATE TABLE some_table (
- id INTEGER NOT NULL,
- data INTEGER NOT NULL ON CONFLICT FAIL,
- PRIMARY KEY (id)
- )
- Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
- some_table = Table(
- "some_table",
- metadata,
- Column(
- "id",
- Integer,
- primary_key=True,
- sqlite_on_conflict_primary_key="FAIL",
- ),
- )
- SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
- resolution algorithm is applied to the constraint itself:
- .. sourcecode:: sql
- CREATE TABLE some_table (
- id INTEGER NOT NULL,
- PRIMARY KEY (id) ON CONFLICT FAIL
- )
- .. _sqlite_on_conflict_insert:
- INSERT...ON CONFLICT (Upsert)
- -----------------------------
- .. seealso:: This section describes the :term:`DML` version of "ON CONFLICT" for
- SQLite, which occurs within an INSERT statement. For "ON CONFLICT" as
- applied to a CREATE TABLE statement, see :ref:`sqlite_on_conflict_ddl`.
- From version 3.24.0 onwards, SQLite supports "upserts" (update or insert)
- of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT``
- statement. A candidate row will only be inserted if that row does not violate
- any unique or primary key constraints. In the case of a unique constraint violation, a
- secondary action can occur which can be either "DO UPDATE", indicating that
- the data in the target row should be updated, or "DO NOTHING", which indicates
- to silently skip this row.
- Conflicts are determined using columns that are part of existing unique
- constraints and indexes. These constraints are identified by stating the
- columns and conditions that comprise the indexes.
- SQLAlchemy provides ``ON CONFLICT`` support via the SQLite-specific
- :func:`_sqlite.insert()` function, which provides
- the generative methods :meth:`_sqlite.Insert.on_conflict_do_update`
- and :meth:`_sqlite.Insert.on_conflict_do_nothing`:
- .. sourcecode:: pycon+sql
- >>> from sqlalchemy.dialects.sqlite import insert
- >>> insert_stmt = insert(my_table).values(
- ... id="some_existing_id", data="inserted value"
- ... )
- >>> do_update_stmt = insert_stmt.on_conflict_do_update(
- ... index_elements=["id"], set_=dict(data="updated value")
- ... )
- >>> print(do_update_stmt)
- {printsql}INSERT INTO my_table (id, data) VALUES (?, ?)
- ON CONFLICT (id) DO UPDATE SET data = ?{stop}
- >>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["id"])
- >>> print(do_nothing_stmt)
- {printsql}INSERT INTO my_table (id, data) VALUES (?, ?)
- ON CONFLICT (id) DO NOTHING
- .. versionadded:: 1.4
- .. seealso::
- `Upsert
- <https://sqlite.org/lang_UPSERT.html>`_
- - in the SQLite documentation.
- Specifying the Target
- ^^^^^^^^^^^^^^^^^^^^^
- Both methods supply the "target" of the conflict using column inference:
- * The :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements` argument
- specifies a sequence containing string column names, :class:`_schema.Column`
- objects, and/or SQL expression elements, which would identify a unique index
- or unique constraint.
- * When using :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements`
- to infer an index, a partial index can be inferred by also specifying the
- :paramref:`_sqlite.Insert.on_conflict_do_update.index_where` parameter:
- .. sourcecode:: pycon+sql
- >>> stmt = insert(my_table).values(user_email="a@b.com", data="inserted data")
- >>> do_update_stmt = stmt.on_conflict_do_update(
- ... index_elements=[my_table.c.user_email],
- ... index_where=my_table.c.user_email.like("%@gmail.com"),
- ... set_=dict(data=stmt.excluded.data),
- ... )
- >>> print(do_update_stmt)
- {printsql}INSERT INTO my_table (data, user_email) VALUES (?, ?)
- ON CONFLICT (user_email)
- WHERE user_email LIKE '%@gmail.com'
- DO UPDATE SET data = excluded.data
- The SET Clause
- ^^^^^^^^^^^^^^^
- ``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
- existing row, using any combination of new values as well as values
- from the proposed insertion. These values are specified using the
- :paramref:`_sqlite.Insert.on_conflict_do_update.set_` parameter. This
- parameter accepts a dictionary which consists of direct values
- for UPDATE:
- .. sourcecode:: pycon+sql
- >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
- >>> do_update_stmt = stmt.on_conflict_do_update(
- ... index_elements=["id"], set_=dict(data="updated value")
- ... )
- >>> print(do_update_stmt)
- {printsql}INSERT INTO my_table (id, data) VALUES (?, ?)
- ON CONFLICT (id) DO UPDATE SET data = ?
- .. warning::
- The :meth:`_sqlite.Insert.on_conflict_do_update` method does **not** take
- into account Python-side default UPDATE values or generation functions,
- e.g. those specified using :paramref:`_schema.Column.onupdate`. These
- values will not be exercised for an ON CONFLICT style of UPDATE, unless
- they are manually specified in the
- :paramref:`_sqlite.Insert.on_conflict_do_update.set_` dictionary.
- Updating using the Excluded INSERT Values
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- In order to refer to the proposed insertion row, the special alias
- :attr:`~.sqlite.Insert.excluded` is available as an attribute on
- the :class:`_sqlite.Insert` object; this object creates an "excluded." prefix
- on a column, that informs the DO UPDATE to update the row with the value that
- would have been inserted had the constraint not failed:
- .. sourcecode:: pycon+sql
- >>> stmt = insert(my_table).values(
- ... id="some_id", data="inserted value", author="jlh"
- ... )
- >>> do_update_stmt = stmt.on_conflict_do_update(
- ... index_elements=["id"],
- ... set_=dict(data="updated value", author=stmt.excluded.author),
- ... )
- >>> print(do_update_stmt)
- {printsql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
- ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
- Additional WHERE Criteria
- ^^^^^^^^^^^^^^^^^^^^^^^^^
- The :meth:`_sqlite.Insert.on_conflict_do_update` method also accepts
- a WHERE clause using the :paramref:`_sqlite.Insert.on_conflict_do_update.where`
- parameter, which will limit those rows which receive an UPDATE:
- .. sourcecode:: pycon+sql
- >>> stmt = insert(my_table).values(
- ... id="some_id", data="inserted value", author="jlh"
- ... )
- >>> on_update_stmt = stmt.on_conflict_do_update(
- ... index_elements=["id"],
- ... set_=dict(data="updated value", author=stmt.excluded.author),
- ... where=(my_table.c.status == 2),
- ... )
- >>> print(on_update_stmt)
- {printsql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
- ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
- WHERE my_table.status = ?
- Skipping Rows with DO NOTHING
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- ``ON CONFLICT`` may be used to skip inserting a row entirely
- if any conflict with a unique constraint occurs; below this is illustrated
- using the :meth:`_sqlite.Insert.on_conflict_do_nothing` method:
- .. sourcecode:: pycon+sql
- >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
- >>> stmt = stmt.on_conflict_do_nothing(index_elements=["id"])
- >>> print(stmt)
- {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT (id) DO NOTHING
- If ``DO NOTHING`` is used without specifying any columns or constraint,
- it has the effect of skipping the INSERT for any unique violation which
- occurs:
- .. sourcecode:: pycon+sql
- >>> stmt = insert(my_table).values(id="some_id", data="inserted value")
- >>> stmt = stmt.on_conflict_do_nothing()
- >>> print(stmt)
- {printsql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT DO NOTHING
- .. _sqlite_type_reflection:
- Type Reflection
- ---------------
- SQLite types are unlike those of most other database backends, in that
- the string name of the type usually does not correspond to a "type" in a
- one-to-one fashion. Instead, SQLite links per-column typing behavior
- to one of five so-called "type affinities" based on a string matching
- pattern for the type.
- SQLAlchemy's reflection process, when inspecting types, uses a simple
- lookup table to link the keywords returned to provided SQLAlchemy types.
- This lookup table is present within the SQLite dialect as it is for all
- other dialects. However, the SQLite dialect has a different "fallback"
- routine for when a particular type name is not located in the lookup map;
- it instead implements the SQLite "type affinity" scheme located at
- https://www.sqlite.org/datatype3.html section 2.1.
- The provided typemap will make direct associations from an exact string
- name match for the following types:
- :class:`_types.BIGINT`, :class:`_types.BLOB`,
- :class:`_types.BOOLEAN`, :class:`_types.BOOLEAN`,
- :class:`_types.CHAR`, :class:`_types.DATE`,
- :class:`_types.DATETIME`, :class:`_types.FLOAT`,
- :class:`_types.DECIMAL`, :class:`_types.FLOAT`,
- :class:`_types.INTEGER`, :class:`_types.INTEGER`,
- :class:`_types.NUMERIC`, :class:`_types.REAL`,
- :class:`_types.SMALLINT`, :class:`_types.TEXT`,
- :class:`_types.TIME`, :class:`_types.TIMESTAMP`,
- :class:`_types.VARCHAR`, :class:`_types.NVARCHAR`,
- :class:`_types.NCHAR`
- When a type name does not match one of the above types, the "type affinity"
- lookup is used instead:
- * :class:`_types.INTEGER` is returned if the type name includes the
- string ``INT``
- * :class:`_types.TEXT` is returned if the type name includes the
- string ``CHAR``, ``CLOB`` or ``TEXT``
- * :class:`_types.NullType` is returned if the type name includes the
- string ``BLOB``
- * :class:`_types.REAL` is returned if the type name includes the string
- ``REAL``, ``FLOA`` or ``DOUB``.
- * Otherwise, the :class:`_types.NUMERIC` type is used.
- .. _sqlite_partial_index:
- Partial Indexes
- ---------------
- A partial index, e.g. one which uses a WHERE clause, can be specified
- with the DDL system using the argument ``sqlite_where``::
- tbl = Table("testtbl", m, Column("data", Integer))
- idx = Index(
- "test_idx1",
- tbl.c.data,
- sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10),
- )
- The index will be rendered at create time as:
- .. sourcecode:: sql
- CREATE INDEX test_idx1 ON testtbl (data)
- WHERE data > 5 AND data < 10
- .. _sqlite_dotted_column_names:
- Dotted Column Names
- -------------------
- Using table or column names that explicitly have periods in them is
- **not recommended**. While this is generally a bad idea for relational
- databases in general, as the dot is a syntactically significant character,
- the SQLite driver up until version **3.10.0** of SQLite has a bug which
- requires that SQLAlchemy filter out these dots in result sets.
- The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
- import sqlite3
- assert sqlite3.sqlite_version_info < (
- 3,
- 10,
- 0,
- ), "bug is fixed in this version"
- conn = sqlite3.connect(":memory:")
- cursor = conn.cursor()
- cursor.execute("create table x (a integer, b integer)")
- cursor.execute("insert into x (a, b) values (1, 1)")
- cursor.execute("insert into x (a, b) values (2, 2)")
- cursor.execute("select x.a, x.b from x")
- assert [c[0] for c in cursor.description] == ["a", "b"]
- cursor.execute(
- """
- select x.a, x.b from x where a=1
- union
- select x.a, x.b from x where a=2
- """
- )
- assert [c[0] for c in cursor.description] == ["a", "b"], [
- c[0] for c in cursor.description
- ]
- The second assertion fails:
- .. sourcecode:: text
- Traceback (most recent call last):
- File "test.py", line 19, in <module>
- [c[0] for c in cursor.description]
- AssertionError: ['x.a', 'x.b']
- Where above, the driver incorrectly reports the names of the columns
- including the name of the table, which is entirely inconsistent vs.
- when the UNION is not present.
- SQLAlchemy relies upon column names being predictable in how they match
- to the original statement, so the SQLAlchemy dialect has no choice but
- to filter these out::
- from sqlalchemy import create_engine
- eng = create_engine("sqlite://")
- conn = eng.connect()
- conn.exec_driver_sql("create table x (a integer, b integer)")
- conn.exec_driver_sql("insert into x (a, b) values (1, 1)")
- conn.exec_driver_sql("insert into x (a, b) values (2, 2)")
- result = conn.exec_driver_sql("select x.a, x.b from x")
- assert result.keys() == ["a", "b"]
- result = conn.exec_driver_sql(
- """
- select x.a, x.b from x where a=1
- union
- select x.a, x.b from x where a=2
- """
- )
- assert result.keys() == ["a", "b"]
- Note that above, even though SQLAlchemy filters out the dots, *both
- names are still addressable*::
- >>> row = result.first()
- >>> row["a"]
- 1
- >>> row["x.a"]
- 1
- >>> row["b"]
- 1
- >>> row["x.b"]
- 1
- Therefore, the workaround applied by SQLAlchemy only impacts
- :meth:`_engine.CursorResult.keys` and :meth:`.Row.keys()` in the public API. In
- the very specific case where an application is forced to use column names that
- contain dots, and the functionality of :meth:`_engine.CursorResult.keys` and
- :meth:`.Row.keys()` is required to return these dotted names unmodified,
- the ``sqlite_raw_colnames`` execution option may be provided, either on a
- per-:class:`_engine.Connection` basis::
- result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql(
- """
- select x.a, x.b from x where a=1
- union
- select x.a, x.b from x where a=2
- """
- )
- assert result.keys() == ["x.a", "x.b"]
- or on a per-:class:`_engine.Engine` basis::
- engine = create_engine(
- "sqlite://", execution_options={"sqlite_raw_colnames": True}
- )
- When using the per-:class:`_engine.Engine` execution option, note that
- **Core and ORM queries that use UNION may not function properly**.
- SQLite-specific table options
- -----------------------------
- One option for CREATE TABLE is supported directly by the SQLite
- dialect in conjunction with the :class:`_schema.Table` construct:
- * ``WITHOUT ROWID``::
- Table("some_table", metadata, ..., sqlite_with_rowid=False)
- *
- ``STRICT``::
- Table("some_table", metadata, ..., sqlite_strict=True)
- .. versionadded:: 2.0.37
- .. seealso::
- `SQLite CREATE TABLE options
- <https://www.sqlite.org/lang_createtable.html>`_
- .. _sqlite_include_internal:
- Reflecting internal schema tables
- ----------------------------------
- Reflection methods that return lists of tables will omit so-called
- "SQLite internal schema object" names, which are considered by SQLite
- as any object name that is prefixed with ``sqlite_``. An example of
- such an object is the ``sqlite_sequence`` table that's generated when
- the ``AUTOINCREMENT`` column parameter is used. In order to return
- these objects, the parameter ``sqlite_include_internal=True`` may be
- passed to methods such as :meth:`_schema.MetaData.reflect` or
- :meth:`.Inspector.get_table_names`.
- .. versionadded:: 2.0 Added the ``sqlite_include_internal=True`` parameter.
- Previously, these tables were not ignored by SQLAlchemy reflection
- methods.
- .. note::
- The ``sqlite_include_internal`` parameter does not refer to the
- "system" tables that are present in schemas such as ``sqlite_master``.
- .. seealso::
- `SQLite Internal Schema Objects <https://www.sqlite.org/fileformat2.html#intschema>`_ - in the SQLite
- documentation.
- ''' # noqa
- from __future__ import annotations
- import datetime
- import numbers
- import re
- from typing import Optional
- from .json import JSON
- from .json import JSONIndexType
- from .json import JSONPathType
- from ... import exc
- from ... import schema as sa_schema
- from ... import sql
- from ... import text
- from ... import types as sqltypes
- from ... import util
- from ...engine import default
- from ...engine import processors
- from ...engine import reflection
- from ...engine.reflection import ReflectionDefaults
- from ...sql import coercions
- from ...sql import compiler
- from ...sql import elements
- from ...sql import roles
- from ...sql import schema
- from ...types import BLOB # noqa
- from ...types import BOOLEAN # noqa
- from ...types import CHAR # noqa
- from ...types import DECIMAL # noqa
- from ...types import FLOAT # noqa
- from ...types import INTEGER # noqa
- from ...types import NUMERIC # noqa
- from ...types import REAL # noqa
- from ...types import SMALLINT # noqa
- from ...types import TEXT # noqa
- from ...types import TIMESTAMP # noqa
- from ...types import VARCHAR # noqa
- class _SQliteJson(JSON):
- def result_processor(self, dialect, coltype):
- default_processor = super().result_processor(dialect, coltype)
- def process(value):
- try:
- return default_processor(value)
- except TypeError:
- if isinstance(value, numbers.Number):
- return value
- else:
- raise
- return process
- class _DateTimeMixin:
- _reg = None
- _storage_format = None
- def __init__(self, storage_format=None, regexp=None, **kw):
- super().__init__(**kw)
- if regexp is not None:
- self._reg = re.compile(regexp)
- if storage_format is not None:
- self._storage_format = storage_format
- @property
- def format_is_text_affinity(self):
- """return True if the storage format will automatically imply
- a TEXT affinity.
- If the storage format contains no non-numeric characters,
- it will imply a NUMERIC storage format on SQLite; in this case,
- the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
- TIME_CHAR.
- """
- spec = self._storage_format % {
- "year": 0,
- "month": 0,
- "day": 0,
- "hour": 0,
- "minute": 0,
- "second": 0,
- "microsecond": 0,
- }
- return bool(re.search(r"[^0-9]", spec))
- def adapt(self, cls, **kw):
- if issubclass(cls, _DateTimeMixin):
- if self._storage_format:
- kw["storage_format"] = self._storage_format
- if self._reg:
- kw["regexp"] = self._reg
- return super().adapt(cls, **kw)
- def literal_processor(self, dialect):
- bp = self.bind_processor(dialect)
- def process(value):
- return "'%s'" % bp(value)
- return process
- class DATETIME(_DateTimeMixin, sqltypes.DateTime):
- r"""Represent a Python datetime object in SQLite using a string.
- The default string storage format is::
- "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
- e.g.:
- .. sourcecode:: text
- 2021-03-15 12:05:57.105542
- The incoming storage format is by default parsed using the
- Python ``datetime.fromisoformat()`` function.
- .. versionchanged:: 2.0 ``datetime.fromisoformat()`` is used for default
- datetime string parsing.
- The storage format can be customized to some degree using the
- ``storage_format`` and ``regexp`` parameters, such as::
- import re
- from sqlalchemy.dialects.sqlite import DATETIME
- dt = DATETIME(
- storage_format=(
- "%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(minute)02d:%(second)02d"
- ),
- regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)",
- )
- :param truncate_microseconds: when ``True`` microseconds will be truncated
- from the datetime. Can't be specified together with ``storage_format``
- or ``regexp``.
- :param storage_format: format string which will be applied to the dict
- with keys year, month, day, hour, minute, second, and microsecond.
- :param regexp: regular expression which will be applied to incoming result
- rows, replacing the use of ``datetime.fromisoformat()`` to parse incoming
- strings. If the regexp contains named groups, the resulting match dict is
- applied to the Python datetime() constructor as keyword arguments.
- Otherwise, if positional groups are used, the datetime() constructor
- is called with positional arguments via
- ``*map(int, match_obj.groups(0))``.
- """ # noqa
- _storage_format = (
- "%(year)04d-%(month)02d-%(day)02d "
- "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
- )
- def __init__(self, *args, **kwargs):
- truncate_microseconds = kwargs.pop("truncate_microseconds", False)
- super().__init__(*args, **kwargs)
- if truncate_microseconds:
- assert "storage_format" not in kwargs, (
- "You can specify only "
- "one of truncate_microseconds or storage_format."
- )
- assert "regexp" not in kwargs, (
- "You can specify only one of "
- "truncate_microseconds or regexp."
- )
- self._storage_format = (
- "%(year)04d-%(month)02d-%(day)02d "
- "%(hour)02d:%(minute)02d:%(second)02d"
- )
- def bind_processor(self, dialect):
- datetime_datetime = datetime.datetime
- datetime_date = datetime.date
- format_ = self._storage_format
- def process(value):
- if value is None:
- return None
- elif isinstance(value, datetime_datetime):
- return format_ % {
- "year": value.year,
- "month": value.month,
- "day": value.day,
- "hour": value.hour,
- "minute": value.minute,
- "second": value.second,
- "microsecond": value.microsecond,
- }
- elif isinstance(value, datetime_date):
- return format_ % {
- "year": value.year,
- "month": value.month,
- "day": value.day,
- "hour": 0,
- "minute": 0,
- "second": 0,
- "microsecond": 0,
- }
- else:
- raise TypeError(
- "SQLite DateTime type only accepts Python "
- "datetime and date objects as input."
- )
- return process
- def result_processor(self, dialect, coltype):
- if self._reg:
- return processors.str_to_datetime_processor_factory(
- self._reg, datetime.datetime
- )
- else:
- return processors.str_to_datetime
- class DATE(_DateTimeMixin, sqltypes.Date):
- r"""Represent a Python date object in SQLite using a string.
- The default string storage format is::
- "%(year)04d-%(month)02d-%(day)02d"
- e.g.:
- .. sourcecode:: text
- 2011-03-15
- The incoming storage format is by default parsed using the
- Python ``date.fromisoformat()`` function.
- .. versionchanged:: 2.0 ``date.fromisoformat()`` is used for default
- date string parsing.
- The storage format can be customized to some degree using the
- ``storage_format`` and ``regexp`` parameters, such as::
- import re
- from sqlalchemy.dialects.sqlite import DATE
- d = DATE(
- storage_format="%(month)02d/%(day)02d/%(year)04d",
- regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)"),
- )
- :param storage_format: format string which will be applied to the
- dict with keys year, month, and day.
- :param regexp: regular expression which will be applied to
- incoming result rows, replacing the use of ``date.fromisoformat()`` to
- parse incoming strings. If the regexp contains named groups, the resulting
- match dict is applied to the Python date() constructor as keyword
- arguments. Otherwise, if positional groups are used, the date()
- constructor is called with positional arguments via
- ``*map(int, match_obj.groups(0))``.
- """
- _storage_format = "%(year)04d-%(month)02d-%(day)02d"
- def bind_processor(self, dialect):
- datetime_date = datetime.date
- format_ = self._storage_format
- def process(value):
- if value is None:
- return None
- elif isinstance(value, datetime_date):
- return format_ % {
- "year": value.year,
- "month": value.month,
- "day": value.day,
- }
- else:
- raise TypeError(
- "SQLite Date type only accepts Python "
- "date objects as input."
- )
- return process
- def result_processor(self, dialect, coltype):
- if self._reg:
- return processors.str_to_datetime_processor_factory(
- self._reg, datetime.date
- )
- else:
- return processors.str_to_date
- class TIME(_DateTimeMixin, sqltypes.Time):
- r"""Represent a Python time object in SQLite using a string.
- The default string storage format is::
- "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
- e.g.:
- .. sourcecode:: text
- 12:05:57.10558
- The incoming storage format is by default parsed using the
- Python ``time.fromisoformat()`` function.
- .. versionchanged:: 2.0 ``time.fromisoformat()`` is used for default
- time string parsing.
- The storage format can be customized to some degree using the
- ``storage_format`` and ``regexp`` parameters, such as::
- import re
- from sqlalchemy.dialects.sqlite import TIME
- t = TIME(
- storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
- regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?"),
- )
- :param truncate_microseconds: when ``True`` microseconds will be truncated
- from the time. Can't be specified together with ``storage_format``
- or ``regexp``.
- :param storage_format: format string which will be applied to the dict
- with keys hour, minute, second, and microsecond.
- :param regexp: regular expression which will be applied to incoming result
- rows, replacing the use of ``datetime.fromisoformat()`` to parse incoming
- strings. If the regexp contains named groups, the resulting match dict is
- applied to the Python time() constructor as keyword arguments. Otherwise,
- if positional groups are used, the time() constructor is called with
- positional arguments via ``*map(int, match_obj.groups(0))``.
- """
- _storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
- def __init__(self, *args, **kwargs):
- truncate_microseconds = kwargs.pop("truncate_microseconds", False)
- super().__init__(*args, **kwargs)
- if truncate_microseconds:
- assert "storage_format" not in kwargs, (
- "You can specify only "
- "one of truncate_microseconds or storage_format."
- )
- assert "regexp" not in kwargs, (
- "You can specify only one of "
- "truncate_microseconds or regexp."
- )
- self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
- def bind_processor(self, dialect):
- datetime_time = datetime.time
- format_ = self._storage_format
- def process(value):
- if value is None:
- return None
- elif isinstance(value, datetime_time):
- return format_ % {
- "hour": value.hour,
- "minute": value.minute,
- "second": value.second,
- "microsecond": value.microsecond,
- }
- else:
- raise TypeError(
- "SQLite Time type only accepts Python "
- "time objects as input."
- )
- return process
- def result_processor(self, dialect, coltype):
- if self._reg:
- return processors.str_to_datetime_processor_factory(
- self._reg, datetime.time
- )
- else:
- return processors.str_to_time
- colspecs = {
- sqltypes.Date: DATE,
- sqltypes.DateTime: DATETIME,
- sqltypes.JSON: _SQliteJson,
- sqltypes.JSON.JSONIndexType: JSONIndexType,
- sqltypes.JSON.JSONPathType: JSONPathType,
- sqltypes.Time: TIME,
- }
- ischema_names = {
- "BIGINT": sqltypes.BIGINT,
- "BLOB": sqltypes.BLOB,
- "BOOL": sqltypes.BOOLEAN,
- "BOOLEAN": sqltypes.BOOLEAN,
- "CHAR": sqltypes.CHAR,
- "DATE": sqltypes.DATE,
- "DATE_CHAR": sqltypes.DATE,
- "DATETIME": sqltypes.DATETIME,
- "DATETIME_CHAR": sqltypes.DATETIME,
- "DOUBLE": sqltypes.DOUBLE,
- "DECIMAL": sqltypes.DECIMAL,
- "FLOAT": sqltypes.FLOAT,
- "INT": sqltypes.INTEGER,
- "INTEGER": sqltypes.INTEGER,
- "JSON": JSON,
- "NUMERIC": sqltypes.NUMERIC,
- "REAL": sqltypes.REAL,
- "SMALLINT": sqltypes.SMALLINT,
- "TEXT": sqltypes.TEXT,
- "TIME": sqltypes.TIME,
- "TIME_CHAR": sqltypes.TIME,
- "TIMESTAMP": sqltypes.TIMESTAMP,
- "VARCHAR": sqltypes.VARCHAR,
- "NVARCHAR": sqltypes.NVARCHAR,
- "NCHAR": sqltypes.NCHAR,
- }
- class SQLiteCompiler(compiler.SQLCompiler):
- extract_map = util.update_copy(
- compiler.SQLCompiler.extract_map,
- {
- "month": "%m",
- "day": "%d",
- "year": "%Y",
- "second": "%S",
- "hour": "%H",
- "doy": "%j",
- "minute": "%M",
- "epoch": "%s",
- "dow": "%w",
- "week": "%W",
- },
- )
- def visit_truediv_binary(self, binary, operator, **kw):
- return (
- self.process(binary.left, **kw)
- + " / "
- + "(%s + 0.0)" % self.process(binary.right, **kw)
- )
- def visit_now_func(self, fn, **kw):
- return "CURRENT_TIMESTAMP"
- def visit_localtimestamp_func(self, func, **kw):
- return "DATETIME(CURRENT_TIMESTAMP, 'localtime')"
- def visit_true(self, expr, **kw):
- return "1"
- def visit_false(self, expr, **kw):
- return "0"
- def visit_char_length_func(self, fn, **kw):
- return "length%s" % self.function_argspec(fn)
- def visit_aggregate_strings_func(self, fn, **kw):
- return "group_concat%s" % self.function_argspec(fn)
- def visit_cast(self, cast, **kwargs):
- if self.dialect.supports_cast:
- return super().visit_cast(cast, **kwargs)
- else:
- return self.process(cast.clause, **kwargs)
- def visit_extract(self, extract, **kw):
- try:
- return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
- self.extract_map[extract.field],
- self.process(extract.expr, **kw),
- )
- except KeyError as err:
- raise exc.CompileError(
- "%s is not a valid extract argument." % extract.field
- ) from err
- def returning_clause(
- self,
- stmt,
- returning_cols,
- *,
- populate_result_map,
- **kw,
- ):
- kw["include_table"] = False
- return super().returning_clause(
- stmt, returning_cols, populate_result_map=populate_result_map, **kw
- )
- def limit_clause(self, select, **kw):
- text = ""
- if select._limit_clause is not None:
- text += "\n LIMIT " + self.process(select._limit_clause, **kw)
- if select._offset_clause is not None:
- if select._limit_clause is None:
- text += "\n LIMIT " + self.process(sql.literal(-1))
- text += " OFFSET " + self.process(select._offset_clause, **kw)
- else:
- text += " OFFSET " + self.process(sql.literal(0), **kw)
- return text
- def for_update_clause(self, select, **kw):
- # sqlite has no "FOR UPDATE" AFAICT
- return ""
- def update_from_clause(
- self, update_stmt, from_table, extra_froms, from_hints, **kw
- ):
- kw["asfrom"] = True
- return "FROM " + ", ".join(
- t._compiler_dispatch(self, fromhints=from_hints, **kw)
- for t in extra_froms
- )
- def visit_is_distinct_from_binary(self, binary, operator, **kw):
- return "%s IS NOT %s" % (
- self.process(binary.left),
- self.process(binary.right),
- )
- def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
- return "%s IS %s" % (
- self.process(binary.left),
- self.process(binary.right),
- )
- def visit_json_getitem_op_binary(self, binary, operator, **kw):
- if binary.type._type_affinity is sqltypes.JSON:
- expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
- else:
- expr = "JSON_EXTRACT(%s, %s)"
- return expr % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw),
- )
- def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
- if binary.type._type_affinity is sqltypes.JSON:
- expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
- else:
- expr = "JSON_EXTRACT(%s, %s)"
- return expr % (
- self.process(binary.left, **kw),
- self.process(binary.right, **kw),
- )
- def visit_empty_set_op_expr(self, type_, expand_op, **kw):
- # slightly old SQLite versions don't seem to be able to handle
- # the empty set impl
- return self.visit_empty_set_expr(type_)
- def visit_empty_set_expr(self, element_types, **kw):
- return "SELECT %s FROM (SELECT %s) WHERE 1!=1" % (
- ", ".join("1" for type_ in element_types or [INTEGER()]),
- ", ".join("1" for type_ in element_types or [INTEGER()]),
- )
- def visit_regexp_match_op_binary(self, binary, operator, **kw):
- return self._generate_generic_binary(binary, " REGEXP ", **kw)
- def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
- return self._generate_generic_binary(binary, " NOT REGEXP ", **kw)
- def _on_conflict_target(self, clause, **kw):
- if clause.inferred_target_elements is not None:
- target_text = "(%s)" % ", ".join(
- (
- self.preparer.quote(c)
- if isinstance(c, str)
- else self.process(c, include_table=False, use_schema=False)
- )
- for c in clause.inferred_target_elements
- )
- if clause.inferred_target_whereclause is not None:
- target_text += " WHERE %s" % self.process(
- clause.inferred_target_whereclause,
- include_table=False,
- use_schema=False,
- literal_execute=True,
- )
- else:
- target_text = ""
- return target_text
- def visit_on_conflict_do_nothing(self, on_conflict, **kw):
- target_text = self._on_conflict_target(on_conflict, **kw)
- if target_text:
- return "ON CONFLICT %s DO NOTHING" % target_text
- else:
- return "ON CONFLICT DO NOTHING"
- def visit_on_conflict_do_update(self, on_conflict, **kw):
- clause = on_conflict
- target_text = self._on_conflict_target(on_conflict, **kw)
- action_set_ops = []
- set_parameters = dict(clause.update_values_to_set)
- # create a list of column assignment clauses as tuples
- insert_statement = self.stack[-1]["selectable"]
- cols = insert_statement.table.c
- for c in cols:
- col_key = c.key
- if col_key in set_parameters:
- value = set_parameters.pop(col_key)
- elif c in set_parameters:
- value = set_parameters.pop(c)
- else:
- continue
- if coercions._is_literal(value):
- value = elements.BindParameter(None, value, type_=c.type)
- else:
- if (
- isinstance(value, elements.BindParameter)
- and value.type._isnull
- ):
- value = value._clone()
- value.type = c.type
- value_text = self.process(value.self_group(), use_schema=False)
- key_text = self.preparer.quote(c.name)
- action_set_ops.append("%s = %s" % (key_text, value_text))
- # check for names that don't match columns
- if set_parameters:
- util.warn(
- "Additional column names not matching "
- "any column keys in table '%s': %s"
- % (
- self.current_executable.table.name,
- (", ".join("'%s'" % c for c in set_parameters)),
- )
- )
- for k, v in set_parameters.items():
- key_text = (
- self.preparer.quote(k)
- if isinstance(k, str)
- else self.process(k, use_schema=False)
- )
- value_text = self.process(
- coercions.expect(roles.ExpressionElementRole, v),
- use_schema=False,
- )
- action_set_ops.append("%s = %s" % (key_text, value_text))
- action_text = ", ".join(action_set_ops)
- if clause.update_whereclause is not None:
- action_text += " WHERE %s" % self.process(
- clause.update_whereclause, include_table=True, use_schema=False
- )
- return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
- def visit_bitwise_xor_op_binary(self, binary, operator, **kw):
- # sqlite has no xor. Use "a XOR b" = "(a | b) - (a & b)".
- kw["eager_grouping"] = True
- or_ = self._generate_generic_binary(binary, " | ", **kw)
- and_ = self._generate_generic_binary(binary, " & ", **kw)
- return f"({or_} - {and_})"
- class SQLiteDDLCompiler(compiler.DDLCompiler):
- def get_column_specification(self, column, **kwargs):
- coltype = self.dialect.type_compiler_instance.process(
- column.type, type_expression=column
- )
- colspec = self.preparer.format_column(column) + " " + coltype
- default = self.get_column_default_string(column)
- if default is not None:
- if not re.match(r"""^\s*[\'\"\(]""", default) and re.match(
- r".*\W.*", default
- ):
- colspec += f" DEFAULT ({default})"
- else:
- colspec += f" DEFAULT {default}"
- if not column.nullable:
- colspec += " NOT NULL"
- on_conflict_clause = column.dialect_options["sqlite"][
- "on_conflict_not_null"
- ]
- if on_conflict_clause is not None:
- colspec += " ON CONFLICT " + on_conflict_clause
- if column.primary_key:
- if (
- column.autoincrement is True
- and len(column.table.primary_key.columns) != 1
- ):
- raise exc.CompileError(
- "SQLite does not support autoincrement for "
- "composite primary keys"
- )
- if (
- column.table.dialect_options["sqlite"]["autoincrement"]
- and len(column.table.primary_key.columns) == 1
- and issubclass(column.type._type_affinity, sqltypes.Integer)
- and not column.foreign_keys
- ):
- colspec += " PRIMARY KEY"
- on_conflict_clause = column.dialect_options["sqlite"][
- "on_conflict_primary_key"
- ]
- if on_conflict_clause is not None:
- colspec += " ON CONFLICT " + on_conflict_clause
- colspec += " AUTOINCREMENT"
- if column.computed is not None:
- colspec += " " + self.process(column.computed)
- return colspec
- def visit_primary_key_constraint(self, constraint, **kw):
- # for columns with sqlite_autoincrement=True,
- # the PRIMARY KEY constraint can only be inline
- # with the column itself.
- if len(constraint.columns) == 1:
- c = list(constraint)[0]
- if (
- c.primary_key
- and c.table.dialect_options["sqlite"]["autoincrement"]
- and issubclass(c.type._type_affinity, sqltypes.Integer)
- and not c.foreign_keys
- ):
- return None
- text = super().visit_primary_key_constraint(constraint)
- on_conflict_clause = constraint.dialect_options["sqlite"][
- "on_conflict"
- ]
- if on_conflict_clause is None and len(constraint.columns) == 1:
- on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
- "on_conflict_primary_key"
- ]
- if on_conflict_clause is not None:
- text += " ON CONFLICT " + on_conflict_clause
- return text
- def visit_unique_constraint(self, constraint, **kw):
- text = super().visit_unique_constraint(constraint)
- on_conflict_clause = constraint.dialect_options["sqlite"][
- "on_conflict"
- ]
- if on_conflict_clause is None and len(constraint.columns) == 1:
- col1 = list(constraint)[0]
- if isinstance(col1, schema.SchemaItem):
- on_conflict_clause = list(constraint)[0].dialect_options[
- "sqlite"
- ]["on_conflict_unique"]
- if on_conflict_clause is not None:
- text += " ON CONFLICT " + on_conflict_clause
- return text
- def visit_check_constraint(self, constraint, **kw):
- text = super().visit_check_constraint(constraint)
- on_conflict_clause = constraint.dialect_options["sqlite"][
- "on_conflict"
- ]
- if on_conflict_clause is not None:
- text += " ON CONFLICT " + on_conflict_clause
- return text
- def visit_column_check_constraint(self, constraint, **kw):
- text = super().visit_column_check_constraint(constraint)
- if constraint.dialect_options["sqlite"]["on_conflict"] is not None:
- raise exc.CompileError(
- "SQLite does not support on conflict clause for "
- "column check constraint"
- )
- return text
- def visit_foreign_key_constraint(self, constraint, **kw):
- local_table = constraint.elements[0].parent.table
- remote_table = constraint.elements[0].column.table
- if local_table.schema != remote_table.schema:
- return None
- else:
- return super().visit_foreign_key_constraint(constraint)
- def define_constraint_remote_table(self, constraint, table, preparer):
- """Format the remote table clause of a CREATE CONSTRAINT clause."""
- return preparer.format_table(table, use_schema=False)
- def visit_create_index(
- self, create, include_schema=False, include_table_schema=True, **kw
- ):
- index = create.element
- self._verify_index_table(index)
- preparer = self.preparer
- text = "CREATE "
- if index.unique:
- text += "UNIQUE "
- text += "INDEX "
- if create.if_not_exists:
- text += "IF NOT EXISTS "
- text += "%s ON %s (%s)" % (
- self._prepared_index_name(index, include_schema=True),
- preparer.format_table(index.table, use_schema=False),
- ", ".join(
- self.sql_compiler.process(
- expr, include_table=False, literal_binds=True
- )
- for expr in index.expressions
- ),
- )
- whereclause = index.dialect_options["sqlite"]["where"]
- if whereclause is not None:
- where_compiled = self.sql_compiler.process(
- whereclause, include_table=False, literal_binds=True
- )
- text += " WHERE " + where_compiled
- return text
- def post_create_table(self, table):
- table_options = []
- if not table.dialect_options["sqlite"]["with_rowid"]:
- table_options.append("WITHOUT ROWID")
- if table.dialect_options["sqlite"]["strict"]:
- table_options.append("STRICT")
- if table_options:
- return "\n " + ",\n ".join(table_options)
- else:
- return ""
- class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
- def visit_large_binary(self, type_, **kw):
- return self.visit_BLOB(type_)
- def visit_DATETIME(self, type_, **kw):
- if (
- not isinstance(type_, _DateTimeMixin)
- or type_.format_is_text_affinity
- ):
- return super().visit_DATETIME(type_)
- else:
- return "DATETIME_CHAR"
- def visit_DATE(self, type_, **kw):
- if (
- not isinstance(type_, _DateTimeMixin)
- or type_.format_is_text_affinity
- ):
- return super().visit_DATE(type_)
- else:
- return "DATE_CHAR"
- def visit_TIME(self, type_, **kw):
- if (
- not isinstance(type_, _DateTimeMixin)
- or type_.format_is_text_affinity
- ):
- return super().visit_TIME(type_)
- else:
- return "TIME_CHAR"
- def visit_JSON(self, type_, **kw):
- # note this name provides NUMERIC affinity, not TEXT.
- # should not be an issue unless the JSON value consists of a single
- # numeric value. JSONTEXT can be used if this case is required.
- return "JSON"
- class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
- reserved_words = {
- "add",
- "after",
- "all",
- "alter",
- "analyze",
- "and",
- "as",
- "asc",
- "attach",
- "autoincrement",
- "before",
- "begin",
- "between",
- "by",
- "cascade",
- "case",
- "cast",
- "check",
- "collate",
- "column",
- "commit",
- "conflict",
- "constraint",
- "create",
- "cross",
- "current_date",
- "current_time",
- "current_timestamp",
- "database",
- "default",
- "deferrable",
- "deferred",
- "delete",
- "desc",
- "detach",
- "distinct",
- "drop",
- "each",
- "else",
- "end",
- "escape",
- "except",
- "exclusive",
- "exists",
- "explain",
- "false",
- "fail",
- "for",
- "foreign",
- "from",
- "full",
- "glob",
- "group",
- "having",
- "if",
- "ignore",
- "immediate",
- "in",
- "index",
- "indexed",
- "initially",
- "inner",
- "insert",
- "instead",
- "intersect",
- "into",
- "is",
- "isnull",
- "join",
- "key",
- "left",
- "like",
- "limit",
- "match",
- "natural",
- "not",
- "notnull",
- "null",
- "of",
- "offset",
- "on",
- "or",
- "order",
- "outer",
- "plan",
- "pragma",
- "primary",
- "query",
- "raise",
- "references",
- "reindex",
- "rename",
- "replace",
- "restrict",
- "right",
- "rollback",
- "row",
- "select",
- "set",
- "table",
- "temp",
- "temporary",
- "then",
- "to",
- "transaction",
- "trigger",
- "true",
- "union",
- "unique",
- "update",
- "using",
- "vacuum",
- "values",
- "view",
- "virtual",
- "when",
- "where",
- }
- class SQLiteExecutionContext(default.DefaultExecutionContext):
- @util.memoized_property
- def _preserve_raw_colnames(self):
- return (
- not self.dialect._broken_dotted_colnames
- or self.execution_options.get("sqlite_raw_colnames", False)
- )
- def _translate_colname(self, colname):
- # TODO: detect SQLite version 3.10.0 or greater;
- # see [ticket:3633]
- # adjust for dotted column names. SQLite
- # in the case of UNION may store col names as
- # "tablename.colname", or if using an attached database,
- # "database.tablename.colname", in cursor.description
- if not self._preserve_raw_colnames and "." in colname:
- return colname.split(".")[-1], colname
- else:
- return colname, None
- class SQLiteDialect(default.DefaultDialect):
- name = "sqlite"
- supports_alter = False
- # SQlite supports "DEFAULT VALUES" but *does not* support
- # "VALUES (DEFAULT)"
- supports_default_values = True
- supports_default_metavalue = False
- # sqlite issue:
- # https://github.com/python/cpython/issues/93421
- # note this parameter is no longer used by the ORM or default dialect
- # see #9414
- supports_sane_rowcount_returning = False
- supports_empty_insert = False
- supports_cast = True
- supports_multivalues_insert = True
- use_insertmanyvalues = True
- tuple_in_values = True
- supports_statement_cache = True
- insert_null_pk_still_autoincrements = True
- insert_returning = True
- update_returning = True
- update_returning_multifrom = True
- delete_returning = True
- update_returning_multifrom = True
- supports_default_metavalue = True
- """dialect supports INSERT... VALUES (DEFAULT) syntax"""
- default_metavalue_token = "NULL"
- """for INSERT... VALUES (DEFAULT) syntax, the token to put in the
- parenthesis."""
- default_paramstyle = "qmark"
- execution_ctx_cls = SQLiteExecutionContext
- statement_compiler = SQLiteCompiler
- ddl_compiler = SQLiteDDLCompiler
- type_compiler_cls = SQLiteTypeCompiler
- preparer = SQLiteIdentifierPreparer
- ischema_names = ischema_names
- colspecs = colspecs
- construct_arguments = [
- (
- sa_schema.Table,
- {
- "autoincrement": False,
- "with_rowid": True,
- "strict": False,
- },
- ),
- (sa_schema.Index, {"where": None}),
- (
- sa_schema.Column,
- {
- "on_conflict_primary_key": None,
- "on_conflict_not_null": None,
- "on_conflict_unique": None,
- },
- ),
- (sa_schema.Constraint, {"on_conflict": None}),
- ]
- _broken_fk_pragma_quotes = False
- _broken_dotted_colnames = False
- @util.deprecated_params(
- _json_serializer=(
- "1.3.7",
- "The _json_serializer argument to the SQLite dialect has "
- "been renamed to the correct name of json_serializer. The old "
- "argument name will be removed in a future release.",
- ),
- _json_deserializer=(
- "1.3.7",
- "The _json_deserializer argument to the SQLite dialect has "
- "been renamed to the correct name of json_deserializer. The old "
- "argument name will be removed in a future release.",
- ),
- )
- def __init__(
- self,
- native_datetime=False,
- json_serializer=None,
- json_deserializer=None,
- _json_serializer=None,
- _json_deserializer=None,
- **kwargs,
- ):
- default.DefaultDialect.__init__(self, **kwargs)
- if _json_serializer:
- json_serializer = _json_serializer
- if _json_deserializer:
- json_deserializer = _json_deserializer
- self._json_serializer = json_serializer
- self._json_deserializer = json_deserializer
- # this flag used by pysqlite dialect, and perhaps others in the
- # future, to indicate the driver is handling date/timestamp
- # conversions (and perhaps datetime/time as well on some hypothetical
- # driver ?)
- self.native_datetime = native_datetime
- if self.dbapi is not None:
- if self.dbapi.sqlite_version_info < (3, 7, 16):
- util.warn(
- "SQLite version %s is older than 3.7.16, and will not "
- "support right nested joins, as are sometimes used in "
- "more complex ORM scenarios. SQLAlchemy 1.4 and above "
- "no longer tries to rewrite these joins."
- % (self.dbapi.sqlite_version_info,)
- )
- # NOTE: python 3.7 on fedora for me has SQLite 3.34.1. These
- # version checks are getting very stale.
- self._broken_dotted_colnames = self.dbapi.sqlite_version_info < (
- 3,
- 10,
- 0,
- )
- self.supports_default_values = self.dbapi.sqlite_version_info >= (
- 3,
- 3,
- 8,
- )
- self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3)
- self.supports_multivalues_insert = (
- # https://www.sqlite.org/releaselog/3_7_11.html
- self.dbapi.sqlite_version_info
- >= (3, 7, 11)
- )
- # see https://www.sqlalchemy.org/trac/ticket/2568
- # as well as https://www.sqlite.org/src/info/600482d161
- self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < (
- 3,
- 6,
- 14,
- )
- if self.dbapi.sqlite_version_info < (3, 35) or util.pypy:
- self.update_returning = self.delete_returning = (
- self.insert_returning
- ) = False
- if self.dbapi.sqlite_version_info < (3, 32, 0):
- # https://www.sqlite.org/limits.html
- self.insertmanyvalues_max_parameters = 999
- _isolation_lookup = util.immutabledict(
- {"READ UNCOMMITTED": 1, "SERIALIZABLE": 0}
- )
- def get_isolation_level_values(self, dbapi_connection):
- return list(self._isolation_lookup)
- def set_isolation_level(self, dbapi_connection, level):
- isolation_level = self._isolation_lookup[level]
- cursor = dbapi_connection.cursor()
- cursor.execute(f"PRAGMA read_uncommitted = {isolation_level}")
- cursor.close()
- def get_isolation_level(self, dbapi_connection):
- cursor = dbapi_connection.cursor()
- cursor.execute("PRAGMA read_uncommitted")
- res = cursor.fetchone()
- if res:
- value = res[0]
- else:
- # https://www.sqlite.org/changes.html#version_3_3_3
- # "Optional READ UNCOMMITTED isolation (instead of the
- # default isolation level of SERIALIZABLE) and
- # table level locking when database connections
- # share a common cache.""
- # pre-SQLite 3.3.0 default to 0
- value = 0
- cursor.close()
- if value == 0:
- return "SERIALIZABLE"
- elif value == 1:
- return "READ UNCOMMITTED"
- else:
- assert False, "Unknown isolation level %s" % value
- @reflection.cache
- def get_schema_names(self, connection, **kw):
- s = "PRAGMA database_list"
- dl = connection.exec_driver_sql(s)
- return [db[1] for db in dl if db[1] != "temp"]
- def _format_schema(self, schema, table_name):
- if schema is not None:
- qschema = self.identifier_preparer.quote_identifier(schema)
- name = f"{qschema}.{table_name}"
- else:
- name = table_name
- return name
- def _sqlite_main_query(
- self,
- table: str,
- type_: str,
- schema: Optional[str],
- sqlite_include_internal: bool,
- ):
- main = self._format_schema(schema, table)
- if not sqlite_include_internal:
- filter_table = " AND name NOT LIKE 'sqlite~_%' ESCAPE '~'"
- else:
- filter_table = ""
- query = (
- f"SELECT name FROM {main} "
- f"WHERE type='{type_}'{filter_table} "
- "ORDER BY name"
- )
- return query
- @reflection.cache
- def get_table_names(
- self, connection, schema=None, sqlite_include_internal=False, **kw
- ):
- query = self._sqlite_main_query(
- "sqlite_master", "table", schema, sqlite_include_internal
- )
- names = connection.exec_driver_sql(query).scalars().all()
- return names
- @reflection.cache
- def get_temp_table_names(
- self, connection, sqlite_include_internal=False, **kw
- ):
- query = self._sqlite_main_query(
- "sqlite_temp_master", "table", None, sqlite_include_internal
- )
- names = connection.exec_driver_sql(query).scalars().all()
- return names
- @reflection.cache
- def get_temp_view_names(
- self, connection, sqlite_include_internal=False, **kw
- ):
- query = self._sqlite_main_query(
- "sqlite_temp_master", "view", None, sqlite_include_internal
- )
- names = connection.exec_driver_sql(query).scalars().all()
- return names
- @reflection.cache
- def has_table(self, connection, table_name, schema=None, **kw):
- self._ensure_has_table_connection(connection)
- if schema is not None and schema not in self.get_schema_names(
- connection, **kw
- ):
- return False
- info = self._get_table_pragma(
- connection, "table_info", table_name, schema=schema
- )
- return bool(info)
- def _get_default_schema_name(self, connection):
- return "main"
- @reflection.cache
- def get_view_names(
- self, connection, schema=None, sqlite_include_internal=False, **kw
- ):
- query = self._sqlite_main_query(
- "sqlite_master", "view", schema, sqlite_include_internal
- )
- names = connection.exec_driver_sql(query).scalars().all()
- return names
- @reflection.cache
- def get_view_definition(self, connection, view_name, schema=None, **kw):
- if schema is not None:
- qschema = self.identifier_preparer.quote_identifier(schema)
- master = f"{qschema}.sqlite_master"
- s = ("SELECT sql FROM %s WHERE name = ? AND type='view'") % (
- master,
- )
- rs = connection.exec_driver_sql(s, (view_name,))
- else:
- try:
- s = (
- "SELECT sql FROM "
- " (SELECT * FROM sqlite_master UNION ALL "
- " SELECT * FROM sqlite_temp_master) "
- "WHERE name = ? "
- "AND type='view'"
- )
- rs = connection.exec_driver_sql(s, (view_name,))
- except exc.DBAPIError:
- s = (
- "SELECT sql FROM sqlite_master WHERE name = ? "
- "AND type='view'"
- )
- rs = connection.exec_driver_sql(s, (view_name,))
- result = rs.fetchall()
- if result:
- return result[0].sql
- else:
- raise exc.NoSuchTableError(
- f"{schema}.{view_name}" if schema else view_name
- )
- @reflection.cache
- def get_columns(self, connection, table_name, schema=None, **kw):
- pragma = "table_info"
- # computed columns are threaded as hidden, they require table_xinfo
- if self.server_version_info >= (3, 31):
- pragma = "table_xinfo"
- info = self._get_table_pragma(
- connection, pragma, table_name, schema=schema
- )
- columns = []
- tablesql = None
- for row in info:
- name = row[1]
- type_ = row[2].upper()
- nullable = not row[3]
- default = row[4]
- primary_key = row[5]
- hidden = row[6] if pragma == "table_xinfo" else 0
- # hidden has value 0 for normal columns, 1 for hidden columns,
- # 2 for computed virtual columns and 3 for computed stored columns
- # https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b
- if hidden == 1:
- continue
- generated = bool(hidden)
- persisted = hidden == 3
- if tablesql is None and generated:
- tablesql = self._get_table_sql(
- connection, table_name, schema, **kw
- )
- # remove create table
- match = re.match(
- r"create table .*?\((.*)\)$",
- tablesql.strip(),
- re.DOTALL | re.IGNORECASE,
- )
- assert match, f"create table not found in {tablesql}"
- tablesql = match.group(1).strip()
- columns.append(
- self._get_column_info(
- name,
- type_,
- nullable,
- default,
- primary_key,
- generated,
- persisted,
- tablesql,
- )
- )
- if columns:
- return columns
- elif not self.has_table(connection, table_name, schema):
- raise exc.NoSuchTableError(
- f"{schema}.{table_name}" if schema else table_name
- )
- else:
- return ReflectionDefaults.columns()
- def _get_column_info(
- self,
- name,
- type_,
- nullable,
- default,
- primary_key,
- generated,
- persisted,
- tablesql,
- ):
- if generated:
- # the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)"
- # somehow is "INTEGER GENERATED ALWAYS"
- type_ = re.sub("generated", "", type_, flags=re.IGNORECASE)
- type_ = re.sub("always", "", type_, flags=re.IGNORECASE).strip()
- coltype = self._resolve_type_affinity(type_)
- if default is not None:
- default = str(default)
- colspec = {
- "name": name,
- "type": coltype,
- "nullable": nullable,
- "default": default,
- "primary_key": primary_key,
- }
- if generated:
- sqltext = ""
- if tablesql:
- pattern = (
- r"[^,]*\s+GENERATED\s+ALWAYS\s+AS"
- r"\s+\((.*)\)\s*(?:virtual|stored)?"
- )
- match = re.search(
- re.escape(name) + pattern, tablesql, re.IGNORECASE
- )
- if match:
- sqltext = match.group(1)
- colspec["computed"] = {"sqltext": sqltext, "persisted": persisted}
- return colspec
- def _resolve_type_affinity(self, type_):
- """Return a data type from a reflected column, using affinity rules.
- SQLite's goal for universal compatibility introduces some complexity
- during reflection, as a column's defined type might not actually be a
- type that SQLite understands - or indeed, my not be defined *at all*.
- Internally, SQLite handles this with a 'data type affinity' for each
- column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
- 'REAL', or 'NONE' (raw bits). The algorithm that determines this is
- listed in https://www.sqlite.org/datatype3.html section 2.1.
- This method allows SQLAlchemy to support that algorithm, while still
- providing access to smarter reflection utilities by recognizing
- column definitions that SQLite only supports through affinity (like
- DATE and DOUBLE).
- """
- match = re.match(r"([\w ]+)(\(.*?\))?", type_)
- if match:
- coltype = match.group(1)
- args = match.group(2)
- else:
- coltype = ""
- args = ""
- if coltype in self.ischema_names:
- coltype = self.ischema_names[coltype]
- elif "INT" in coltype:
- coltype = sqltypes.INTEGER
- elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype:
- coltype = sqltypes.TEXT
- elif "BLOB" in coltype or not coltype:
- coltype = sqltypes.NullType
- elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype:
- coltype = sqltypes.REAL
- else:
- coltype = sqltypes.NUMERIC
- if args is not None:
- args = re.findall(r"(\d+)", args)
- try:
- coltype = coltype(*[int(a) for a in args])
- except TypeError:
- util.warn(
- "Could not instantiate type %s with "
- "reflected arguments %s; using no arguments."
- % (coltype, args)
- )
- coltype = coltype()
- else:
- coltype = coltype()
- return coltype
- @reflection.cache
- def get_pk_constraint(self, connection, table_name, schema=None, **kw):
- constraint_name = None
- table_data = self._get_table_sql(connection, table_name, schema=schema)
- if table_data:
- PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY"
- result = re.search(PK_PATTERN, table_data, re.I)
- constraint_name = result.group(1) if result else None
- cols = self.get_columns(connection, table_name, schema, **kw)
- # consider only pk columns. This also avoids sorting the cached
- # value returned by get_columns
- cols = [col for col in cols if col.get("primary_key", 0) > 0]
- cols.sort(key=lambda col: col.get("primary_key"))
- pkeys = [col["name"] for col in cols]
- if pkeys:
- return {"constrained_columns": pkeys, "name": constraint_name}
- else:
- return ReflectionDefaults.pk_constraint()
- @reflection.cache
- def get_foreign_keys(self, connection, table_name, schema=None, **kw):
- # sqlite makes this *extremely difficult*.
- # First, use the pragma to get the actual FKs.
- pragma_fks = self._get_table_pragma(
- connection, "foreign_key_list", table_name, schema=schema
- )
- fks = {}
- for row in pragma_fks:
- (numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
- if not rcol:
- # no referred column, which means it was not named in the
- # original DDL. The referred columns of the foreign key
- # constraint are therefore the primary key of the referred
- # table.
- try:
- referred_pk = self.get_pk_constraint(
- connection, rtbl, schema=schema, **kw
- )
- referred_columns = referred_pk["constrained_columns"]
- except exc.NoSuchTableError:
- # ignore not existing parents
- referred_columns = []
- else:
- # note we use this list only if this is the first column
- # in the constraint. for subsequent columns we ignore the
- # list and append "rcol" if present.
- referred_columns = []
- if self._broken_fk_pragma_quotes:
- rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl)
- if numerical_id in fks:
- fk = fks[numerical_id]
- else:
- fk = fks[numerical_id] = {
- "name": None,
- "constrained_columns": [],
- "referred_schema": schema,
- "referred_table": rtbl,
- "referred_columns": referred_columns,
- "options": {},
- }
- fks[numerical_id] = fk
- fk["constrained_columns"].append(lcol)
- if rcol:
- fk["referred_columns"].append(rcol)
- def fk_sig(constrained_columns, referred_table, referred_columns):
- return (
- tuple(constrained_columns)
- + (referred_table,)
- + tuple(referred_columns)
- )
- # then, parse the actual SQL and attempt to find DDL that matches
- # the names as well. SQLite saves the DDL in whatever format
- # it was typed in as, so need to be liberal here.
- keys_by_signature = {
- fk_sig(
- fk["constrained_columns"],
- fk["referred_table"],
- fk["referred_columns"],
- ): fk
- for fk in fks.values()
- }
- table_data = self._get_table_sql(connection, table_name, schema=schema)
- def parse_fks():
- if table_data is None:
- # system tables, etc.
- return
- # note that we already have the FKs from PRAGMA above. This whole
- # regexp thing is trying to locate additional detail about the
- # FKs, namely the name of the constraint and other options.
- # so parsing the columns is really about matching it up to what
- # we already have.
- FK_PATTERN = (
- r"(?:CONSTRAINT (\w+) +)?"
- r"FOREIGN KEY *\( *(.+?) *\) +"
- r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\( *((?:(?:"[^"]+"|[a-z0-9_]+) *(?:, *)?)+)\) *' # noqa: E501
- r"((?:ON (?:DELETE|UPDATE) "
- r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)"
- r"((?:NOT +)?DEFERRABLE)?"
- r"(?: +INITIALLY +(DEFERRED|IMMEDIATE))?"
- )
- for match in re.finditer(FK_PATTERN, table_data, re.I):
- (
- constraint_name,
- constrained_columns,
- referred_quoted_name,
- referred_name,
- referred_columns,
- onupdatedelete,
- deferrable,
- initially,
- ) = match.group(1, 2, 3, 4, 5, 6, 7, 8)
- constrained_columns = list(
- self._find_cols_in_sig(constrained_columns)
- )
- if not referred_columns:
- referred_columns = constrained_columns
- else:
- referred_columns = list(
- self._find_cols_in_sig(referred_columns)
- )
- referred_name = referred_quoted_name or referred_name
- options = {}
- for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
- if token.startswith("DELETE"):
- ondelete = token[6:].strip()
- if ondelete and ondelete != "NO ACTION":
- options["ondelete"] = ondelete
- elif token.startswith("UPDATE"):
- onupdate = token[6:].strip()
- if onupdate and onupdate != "NO ACTION":
- options["onupdate"] = onupdate
- if deferrable:
- options["deferrable"] = "NOT" not in deferrable.upper()
- if initially:
- options["initially"] = initially.upper()
- yield (
- constraint_name,
- constrained_columns,
- referred_name,
- referred_columns,
- options,
- )
- fkeys = []
- for (
- constraint_name,
- constrained_columns,
- referred_name,
- referred_columns,
- options,
- ) in parse_fks():
- sig = fk_sig(constrained_columns, referred_name, referred_columns)
- if sig not in keys_by_signature:
- util.warn(
- "WARNING: SQL-parsed foreign key constraint "
- "'%s' could not be located in PRAGMA "
- "foreign_keys for table %s" % (sig, table_name)
- )
- continue
- key = keys_by_signature.pop(sig)
- key["name"] = constraint_name
- key["options"] = options
- fkeys.append(key)
- # assume the remainders are the unnamed, inline constraints, just
- # use them as is as it's extremely difficult to parse inline
- # constraints
- fkeys.extend(keys_by_signature.values())
- if fkeys:
- return fkeys
- else:
- return ReflectionDefaults.foreign_keys()
- def _find_cols_in_sig(self, sig):
- for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
- yield match.group(1) or match.group(2)
- @reflection.cache
- def get_unique_constraints(
- self, connection, table_name, schema=None, **kw
- ):
- auto_index_by_sig = {}
- for idx in self.get_indexes(
- connection,
- table_name,
- schema=schema,
- include_auto_indexes=True,
- **kw,
- ):
- if not idx["name"].startswith("sqlite_autoindex"):
- continue
- sig = tuple(idx["column_names"])
- auto_index_by_sig[sig] = idx
- table_data = self._get_table_sql(
- connection, table_name, schema=schema, **kw
- )
- unique_constraints = []
- def parse_uqs():
- if table_data is None:
- return
- UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
- INLINE_UNIQUE_PATTERN = (
- r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?)[\t ]'
- r"+[a-z0-9_ ]+?[\t ]+UNIQUE"
- )
- for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
- name, cols = match.group(1, 2)
- yield name, list(self._find_cols_in_sig(cols))
- # we need to match inlines as well, as we seek to differentiate
- # a UNIQUE constraint from a UNIQUE INDEX, even though these
- # are kind of the same thing :)
- for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
- cols = list(
- self._find_cols_in_sig(match.group(1) or match.group(2))
- )
- yield None, cols
- for name, cols in parse_uqs():
- sig = tuple(cols)
- if sig in auto_index_by_sig:
- auto_index_by_sig.pop(sig)
- parsed_constraint = {"name": name, "column_names": cols}
- unique_constraints.append(parsed_constraint)
- # NOTE: auto_index_by_sig might not be empty here,
- # the PRIMARY KEY may have an entry.
- if unique_constraints:
- return unique_constraints
- else:
- return ReflectionDefaults.unique_constraints()
- @reflection.cache
- def get_check_constraints(self, connection, table_name, schema=None, **kw):
- table_data = self._get_table_sql(
- connection, table_name, schema=schema, **kw
- )
- # NOTE NOTE NOTE
- # DO NOT CHANGE THIS REGULAR EXPRESSION. There is no known way
- # to parse CHECK constraints that contain newlines themselves using
- # regular expressions, and the approach here relies upon each
- # individual
- # CHECK constraint being on a single line by itself. This
- # necessarily makes assumptions as to how the CREATE TABLE
- # was emitted. A more comprehensive DDL parsing solution would be
- # needed to improve upon the current situation. See #11840 for
- # background
- CHECK_PATTERN = r"(?:CONSTRAINT (.+) +)?CHECK *\( *(.+) *\),? *"
- cks = []
- for match in re.finditer(CHECK_PATTERN, table_data or "", re.I):
- name = match.group(1)
- if name:
- name = re.sub(r'^"|"$', "", name)
- cks.append({"sqltext": match.group(2), "name": name})
- cks.sort(key=lambda d: d["name"] or "~") # sort None as last
- if cks:
- return cks
- else:
- return ReflectionDefaults.check_constraints()
- @reflection.cache
- def get_indexes(self, connection, table_name, schema=None, **kw):
- pragma_indexes = self._get_table_pragma(
- connection, "index_list", table_name, schema=schema
- )
- indexes = []
- # regular expression to extract the filter predicate of a partial
- # index. this could fail to extract the predicate correctly on
- # indexes created like
- # CREATE INDEX i ON t (col || ') where') WHERE col <> ''
- # but as this function does not support expression-based indexes
- # this case does not occur.
- partial_pred_re = re.compile(r"\)\s+where\s+(.+)", re.IGNORECASE)
- if schema:
- schema_expr = "%s." % self.identifier_preparer.quote_identifier(
- schema
- )
- else:
- schema_expr = ""
- include_auto_indexes = kw.pop("include_auto_indexes", False)
- for row in pragma_indexes:
- # ignore implicit primary key index.
- # https://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
- if not include_auto_indexes and row[1].startswith(
- "sqlite_autoindex"
- ):
- continue
- indexes.append(
- dict(
- name=row[1],
- column_names=[],
- unique=row[2],
- dialect_options={},
- )
- )
- # check partial indexes
- if len(row) >= 5 and row[4]:
- s = (
- "SELECT sql FROM %(schema)ssqlite_master "
- "WHERE name = ? "
- "AND type = 'index'" % {"schema": schema_expr}
- )
- rs = connection.exec_driver_sql(s, (row[1],))
- index_sql = rs.scalar()
- predicate_match = partial_pred_re.search(index_sql)
- if predicate_match is None:
- # unless the regex is broken this case shouldn't happen
- # because we know this is a partial index, so the
- # definition sql should match the regex
- util.warn(
- "Failed to look up filter predicate of "
- "partial index %s" % row[1]
- )
- else:
- predicate = predicate_match.group(1)
- indexes[-1]["dialect_options"]["sqlite_where"] = text(
- predicate
- )
- # loop thru unique indexes to get the column names.
- for idx in list(indexes):
- pragma_index = self._get_table_pragma(
- connection, "index_info", idx["name"], schema=schema
- )
- for row in pragma_index:
- if row[2] is None:
- util.warn(
- "Skipped unsupported reflection of "
- "expression-based index %s" % idx["name"]
- )
- indexes.remove(idx)
- break
- else:
- idx["column_names"].append(row[2])
- indexes.sort(key=lambda d: d["name"] or "~") # sort None as last
- if indexes:
- return indexes
- elif not self.has_table(connection, table_name, schema):
- raise exc.NoSuchTableError(
- f"{schema}.{table_name}" if schema else table_name
- )
- else:
- return ReflectionDefaults.indexes()
- def _is_sys_table(self, table_name):
- return table_name in {
- "sqlite_schema",
- "sqlite_master",
- "sqlite_temp_schema",
- "sqlite_temp_master",
- }
- @reflection.cache
- def _get_table_sql(self, connection, table_name, schema=None, **kw):
- if schema:
- schema_expr = "%s." % (
- self.identifier_preparer.quote_identifier(schema)
- )
- else:
- schema_expr = ""
- try:
- s = (
- "SELECT sql FROM "
- " (SELECT * FROM %(schema)ssqlite_master UNION ALL "
- " SELECT * FROM %(schema)ssqlite_temp_master) "
- "WHERE name = ? "
- "AND type in ('table', 'view')" % {"schema": schema_expr}
- )
- rs = connection.exec_driver_sql(s, (table_name,))
- except exc.DBAPIError:
- s = (
- "SELECT sql FROM %(schema)ssqlite_master "
- "WHERE name = ? "
- "AND type in ('table', 'view')" % {"schema": schema_expr}
- )
- rs = connection.exec_driver_sql(s, (table_name,))
- value = rs.scalar()
- if value is None and not self._is_sys_table(table_name):
- raise exc.NoSuchTableError(f"{schema_expr}{table_name}")
- return value
- def _get_table_pragma(self, connection, pragma, table_name, schema=None):
- quote = self.identifier_preparer.quote_identifier
- if schema is not None:
- statements = [f"PRAGMA {quote(schema)}."]
- else:
- # because PRAGMA looks in all attached databases if no schema
- # given, need to specify "main" schema, however since we want
- # 'temp' tables in the same namespace as 'main', need to run
- # the PRAGMA twice
- statements = ["PRAGMA main.", "PRAGMA temp."]
- qtable = quote(table_name)
- for statement in statements:
- statement = f"{statement}{pragma}({qtable})"
- cursor = connection.exec_driver_sql(statement)
- if not cursor._soft_closed:
- # work around SQLite issue whereby cursor.description
- # is blank when PRAGMA returns no rows:
- # https://www.sqlite.org/cvstrac/tktview?tn=1884
- result = cursor.fetchall()
- else:
- result = []
- if result:
- return result
- else:
- return []
|