asyncpg.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293
  1. # dialects/postgresql/asyncpg.py
  2. # Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
  3. # file>
  4. #
  5. # This module is part of SQLAlchemy and is released under
  6. # the MIT License: https://www.opensource.org/licenses/mit-license.php
  7. # mypy: ignore-errors
  8. r"""
  9. .. dialect:: postgresql+asyncpg
  10. :name: asyncpg
  11. :dbapi: asyncpg
  12. :connectstring: postgresql+asyncpg://user:password@host:port/dbname[?key=value&key=value...]
  13. :url: https://magicstack.github.io/asyncpg/
  14. The asyncpg dialect is SQLAlchemy's first Python asyncio dialect.
  15. Using a special asyncio mediation layer, the asyncpg dialect is usable
  16. as the backend for the :ref:`SQLAlchemy asyncio <asyncio_toplevel>`
  17. extension package.
  18. This dialect should normally be used only with the
  19. :func:`_asyncio.create_async_engine` engine creation function::
  20. from sqlalchemy.ext.asyncio import create_async_engine
  21. engine = create_async_engine(
  22. "postgresql+asyncpg://user:pass@hostname/dbname"
  23. )
  24. .. versionadded:: 1.4
  25. .. note::
  26. By default asyncpg does not decode the ``json`` and ``jsonb`` types and
  27. returns them as strings. SQLAlchemy sets default type decoder for ``json``
  28. and ``jsonb`` types using the python builtin ``json.loads`` function.
  29. The json implementation used can be changed by setting the attribute
  30. ``json_deserializer`` when creating the engine with
  31. :func:`create_engine` or :func:`create_async_engine`.
  32. .. _asyncpg_multihost:
  33. Multihost Connections
  34. --------------------------
  35. The asyncpg dialect features support for multiple fallback hosts in the
  36. same way as that of the psycopg2 and psycopg dialects. The
  37. syntax is the same,
  38. using ``host=<host>:<port>`` combinations as additional query string arguments;
  39. however, there is no default port, so all hosts must have a complete port number
  40. present, otherwise an exception is raised::
  41. engine = create_async_engine(
  42. "postgresql+asyncpg://user:password@/dbname?host=HostA:5432&host=HostB:5432&host=HostC:5432"
  43. )
  44. For complete background on this syntax, see :ref:`psycopg2_multi_host`.
  45. .. versionadded:: 2.0.18
  46. .. seealso::
  47. :ref:`psycopg2_multi_host`
  48. .. _asyncpg_prepared_statement_cache:
  49. Prepared Statement Cache
  50. --------------------------
  51. The asyncpg SQLAlchemy dialect makes use of ``asyncpg.connection.prepare()``
  52. for all statements. The prepared statement objects are cached after
  53. construction which appears to grant a 10% or more performance improvement for
  54. statement invocation. The cache is on a per-DBAPI connection basis, which
  55. means that the primary storage for prepared statements is within DBAPI
  56. connections pooled within the connection pool. The size of this cache
  57. defaults to 100 statements per DBAPI connection and may be adjusted using the
  58. ``prepared_statement_cache_size`` DBAPI argument (note that while this argument
  59. is implemented by SQLAlchemy, it is part of the DBAPI emulation portion of the
  60. asyncpg dialect, therefore is handled as a DBAPI argument, not a dialect
  61. argument)::
  62. engine = create_async_engine(
  63. "postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=500"
  64. )
  65. To disable the prepared statement cache, use a value of zero::
  66. engine = create_async_engine(
  67. "postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=0"
  68. )
  69. .. versionadded:: 1.4.0b2 Added ``prepared_statement_cache_size`` for asyncpg.
  70. .. warning:: The ``asyncpg`` database driver necessarily uses caches for
  71. PostgreSQL type OIDs, which become stale when custom PostgreSQL datatypes
  72. such as ``ENUM`` objects are changed via DDL operations. Additionally,
  73. prepared statements themselves which are optionally cached by SQLAlchemy's
  74. driver as described above may also become "stale" when DDL has been emitted
  75. to the PostgreSQL database which modifies the tables or other objects
  76. involved in a particular prepared statement.
  77. The SQLAlchemy asyncpg dialect will invalidate these caches within its local
  78. process when statements that represent DDL are emitted on a local
  79. connection, but this is only controllable within a single Python process /
  80. database engine. If DDL changes are made from other database engines
  81. and/or processes, a running application may encounter asyncpg exceptions
  82. ``InvalidCachedStatementError`` and/or ``InternalServerError("cache lookup
  83. failed for type <oid>")`` if it refers to pooled database connections which
  84. operated upon the previous structures. The SQLAlchemy asyncpg dialect will
  85. recover from these error cases when the driver raises these exceptions by
  86. clearing its internal caches as well as those of the asyncpg driver in
  87. response to them, but cannot prevent them from being raised in the first
  88. place if the cached prepared statement or asyncpg type caches have gone
  89. stale, nor can it retry the statement as the PostgreSQL transaction is
  90. invalidated when these errors occur.
  91. .. _asyncpg_prepared_statement_name:
  92. Prepared Statement Name with PGBouncer
  93. --------------------------------------
  94. By default, asyncpg enumerates prepared statements in numeric order, which
  95. can lead to errors if a name has already been taken for another prepared
  96. statement. This issue can arise if your application uses database proxies
  97. such as PgBouncer to handle connections. One possible workaround is to
  98. use dynamic prepared statement names, which asyncpg now supports through
  99. an optional ``name`` value for the statement name. This allows you to
  100. generate your own unique names that won't conflict with existing ones.
  101. To achieve this, you can provide a function that will be called every time
  102. a prepared statement is prepared::
  103. from uuid import uuid4
  104. engine = create_async_engine(
  105. "postgresql+asyncpg://user:pass@somepgbouncer/dbname",
  106. poolclass=NullPool,
  107. connect_args={
  108. "prepared_statement_name_func": lambda: f"__asyncpg_{uuid4()}__",
  109. },
  110. )
  111. .. seealso::
  112. https://github.com/MagicStack/asyncpg/issues/837
  113. https://github.com/sqlalchemy/sqlalchemy/issues/6467
  114. .. warning:: When using PGBouncer, to prevent a buildup of useless prepared statements in
  115. your application, it's important to use the :class:`.NullPool` pool
  116. class, and to configure PgBouncer to use `DISCARD <https://www.postgresql.org/docs/current/sql-discard.html>`_
  117. when returning connections. The DISCARD command is used to release resources held by the db connection,
  118. including prepared statements. Without proper setup, prepared statements can
  119. accumulate quickly and cause performance issues.
  120. Disabling the PostgreSQL JIT to improve ENUM datatype handling
  121. ---------------------------------------------------------------
  122. Asyncpg has an `issue <https://github.com/MagicStack/asyncpg/issues/727>`_ when
  123. using PostgreSQL ENUM datatypes, where upon the creation of new database
  124. connections, an expensive query may be emitted in order to retrieve metadata
  125. regarding custom types which has been shown to negatively affect performance.
  126. To mitigate this issue, the PostgreSQL "jit" setting may be disabled from the
  127. client using this setting passed to :func:`_asyncio.create_async_engine`::
  128. engine = create_async_engine(
  129. "postgresql+asyncpg://user:password@localhost/tmp",
  130. connect_args={"server_settings": {"jit": "off"}},
  131. )
  132. .. seealso::
  133. https://github.com/MagicStack/asyncpg/issues/727
  134. """ # noqa
  135. from __future__ import annotations
  136. from collections import deque
  137. import decimal
  138. import json as _py_json
  139. import re
  140. import time
  141. from . import json
  142. from . import ranges
  143. from .array import ARRAY as PGARRAY
  144. from .base import _DECIMAL_TYPES
  145. from .base import _FLOAT_TYPES
  146. from .base import _INT_TYPES
  147. from .base import ENUM
  148. from .base import INTERVAL
  149. from .base import OID
  150. from .base import PGCompiler
  151. from .base import PGDialect
  152. from .base import PGExecutionContext
  153. from .base import PGIdentifierPreparer
  154. from .base import REGCLASS
  155. from .base import REGCONFIG
  156. from .types import BIT
  157. from .types import BYTEA
  158. from .types import CITEXT
  159. from ... import exc
  160. from ... import pool
  161. from ... import util
  162. from ...engine import AdaptedConnection
  163. from ...engine import processors
  164. from ...sql import sqltypes
  165. from ...util.concurrency import asyncio
  166. from ...util.concurrency import await_fallback
  167. from ...util.concurrency import await_only
  168. class AsyncpgARRAY(PGARRAY):
  169. render_bind_cast = True
  170. class AsyncpgString(sqltypes.String):
  171. render_bind_cast = True
  172. class AsyncpgREGCONFIG(REGCONFIG):
  173. render_bind_cast = True
  174. class AsyncpgTime(sqltypes.Time):
  175. render_bind_cast = True
  176. class AsyncpgBit(BIT):
  177. render_bind_cast = True
  178. class AsyncpgByteA(BYTEA):
  179. render_bind_cast = True
  180. class AsyncpgDate(sqltypes.Date):
  181. render_bind_cast = True
  182. class AsyncpgDateTime(sqltypes.DateTime):
  183. render_bind_cast = True
  184. class AsyncpgBoolean(sqltypes.Boolean):
  185. render_bind_cast = True
  186. class AsyncPgInterval(INTERVAL):
  187. render_bind_cast = True
  188. @classmethod
  189. def adapt_emulated_to_native(cls, interval, **kw):
  190. return AsyncPgInterval(precision=interval.second_precision)
  191. class AsyncPgEnum(ENUM):
  192. render_bind_cast = True
  193. class AsyncpgInteger(sqltypes.Integer):
  194. render_bind_cast = True
  195. class AsyncpgSmallInteger(sqltypes.SmallInteger):
  196. render_bind_cast = True
  197. class AsyncpgBigInteger(sqltypes.BigInteger):
  198. render_bind_cast = True
  199. class AsyncpgJSON(json.JSON):
  200. def result_processor(self, dialect, coltype):
  201. return None
  202. class AsyncpgJSONB(json.JSONB):
  203. def result_processor(self, dialect, coltype):
  204. return None
  205. class AsyncpgJSONIndexType(sqltypes.JSON.JSONIndexType):
  206. pass
  207. class AsyncpgJSONIntIndexType(sqltypes.JSON.JSONIntIndexType):
  208. __visit_name__ = "json_int_index"
  209. render_bind_cast = True
  210. class AsyncpgJSONStrIndexType(sqltypes.JSON.JSONStrIndexType):
  211. __visit_name__ = "json_str_index"
  212. render_bind_cast = True
  213. class AsyncpgJSONPathType(json.JSONPathType):
  214. def bind_processor(self, dialect):
  215. def process(value):
  216. if isinstance(value, str):
  217. # If it's already a string assume that it's in json path
  218. # format. This allows using cast with json paths literals
  219. return value
  220. elif value:
  221. tokens = [str(elem) for elem in value]
  222. return tokens
  223. else:
  224. return []
  225. return process
  226. class AsyncpgNumeric(sqltypes.Numeric):
  227. render_bind_cast = True
  228. def bind_processor(self, dialect):
  229. return None
  230. def result_processor(self, dialect, coltype):
  231. if self.asdecimal:
  232. if coltype in _FLOAT_TYPES:
  233. return processors.to_decimal_processor_factory(
  234. decimal.Decimal, self._effective_decimal_return_scale
  235. )
  236. elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
  237. # pg8000 returns Decimal natively for 1700
  238. return None
  239. else:
  240. raise exc.InvalidRequestError(
  241. "Unknown PG numeric type: %d" % coltype
  242. )
  243. else:
  244. if coltype in _FLOAT_TYPES:
  245. # pg8000 returns float natively for 701
  246. return None
  247. elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
  248. return processors.to_float
  249. else:
  250. raise exc.InvalidRequestError(
  251. "Unknown PG numeric type: %d" % coltype
  252. )
  253. class AsyncpgFloat(AsyncpgNumeric, sqltypes.Float):
  254. __visit_name__ = "float"
  255. render_bind_cast = True
  256. class AsyncpgREGCLASS(REGCLASS):
  257. render_bind_cast = True
  258. class AsyncpgOID(OID):
  259. render_bind_cast = True
  260. class AsyncpgCHAR(sqltypes.CHAR):
  261. render_bind_cast = True
  262. class _AsyncpgRange(ranges.AbstractSingleRangeImpl):
  263. def bind_processor(self, dialect):
  264. asyncpg_Range = dialect.dbapi.asyncpg.Range
  265. def to_range(value):
  266. if isinstance(value, ranges.Range):
  267. value = asyncpg_Range(
  268. value.lower,
  269. value.upper,
  270. lower_inc=value.bounds[0] == "[",
  271. upper_inc=value.bounds[1] == "]",
  272. empty=value.empty,
  273. )
  274. return value
  275. return to_range
  276. def result_processor(self, dialect, coltype):
  277. def to_range(value):
  278. if value is not None:
  279. empty = value.isempty
  280. value = ranges.Range(
  281. value.lower,
  282. value.upper,
  283. bounds=f"{'[' if empty or value.lower_inc else '('}" # type: ignore # noqa: E501
  284. f"{']' if not empty and value.upper_inc else ')'}",
  285. empty=empty,
  286. )
  287. return value
  288. return to_range
  289. class _AsyncpgMultiRange(ranges.AbstractMultiRangeImpl):
  290. def bind_processor(self, dialect):
  291. asyncpg_Range = dialect.dbapi.asyncpg.Range
  292. NoneType = type(None)
  293. def to_range(value):
  294. if isinstance(value, (str, NoneType)):
  295. return value
  296. def to_range(value):
  297. if isinstance(value, ranges.Range):
  298. value = asyncpg_Range(
  299. value.lower,
  300. value.upper,
  301. lower_inc=value.bounds[0] == "[",
  302. upper_inc=value.bounds[1] == "]",
  303. empty=value.empty,
  304. )
  305. return value
  306. return [to_range(element) for element in value]
  307. return to_range
  308. def result_processor(self, dialect, coltype):
  309. def to_range_array(value):
  310. def to_range(rvalue):
  311. if rvalue is not None:
  312. empty = rvalue.isempty
  313. rvalue = ranges.Range(
  314. rvalue.lower,
  315. rvalue.upper,
  316. bounds=f"{'[' if empty or rvalue.lower_inc else '('}" # type: ignore # noqa: E501
  317. f"{']' if not empty and rvalue.upper_inc else ')'}",
  318. empty=empty,
  319. )
  320. return rvalue
  321. if value is not None:
  322. value = ranges.MultiRange(to_range(elem) for elem in value)
  323. return value
  324. return to_range_array
  325. class PGExecutionContext_asyncpg(PGExecutionContext):
  326. def handle_dbapi_exception(self, e):
  327. if isinstance(
  328. e,
  329. (
  330. self.dialect.dbapi.InvalidCachedStatementError,
  331. self.dialect.dbapi.InternalServerError,
  332. ),
  333. ):
  334. self.dialect._invalidate_schema_cache()
  335. def pre_exec(self):
  336. if self.isddl:
  337. self.dialect._invalidate_schema_cache()
  338. self.cursor._invalidate_schema_cache_asof = (
  339. self.dialect._invalidate_schema_cache_asof
  340. )
  341. if not self.compiled:
  342. return
  343. def create_server_side_cursor(self):
  344. return self._dbapi_connection.cursor(server_side=True)
  345. class PGCompiler_asyncpg(PGCompiler):
  346. pass
  347. class PGIdentifierPreparer_asyncpg(PGIdentifierPreparer):
  348. pass
  349. class AsyncAdapt_asyncpg_cursor:
  350. __slots__ = (
  351. "_adapt_connection",
  352. "_connection",
  353. "_rows",
  354. "description",
  355. "arraysize",
  356. "rowcount",
  357. "_cursor",
  358. "_invalidate_schema_cache_asof",
  359. )
  360. server_side = False
  361. def __init__(self, adapt_connection):
  362. self._adapt_connection = adapt_connection
  363. self._connection = adapt_connection._connection
  364. self._rows = deque()
  365. self._cursor = None
  366. self.description = None
  367. self.arraysize = 1
  368. self.rowcount = -1
  369. self._invalidate_schema_cache_asof = 0
  370. def close(self):
  371. self._rows.clear()
  372. def _handle_exception(self, error):
  373. self._adapt_connection._handle_exception(error)
  374. async def _prepare_and_execute(self, operation, parameters):
  375. adapt_connection = self._adapt_connection
  376. async with adapt_connection._execute_mutex:
  377. if not adapt_connection._started:
  378. await adapt_connection._start_transaction()
  379. if parameters is None:
  380. parameters = ()
  381. try:
  382. prepared_stmt, attributes = await adapt_connection._prepare(
  383. operation, self._invalidate_schema_cache_asof
  384. )
  385. if attributes:
  386. self.description = [
  387. (
  388. attr.name,
  389. attr.type.oid,
  390. None,
  391. None,
  392. None,
  393. None,
  394. None,
  395. )
  396. for attr in attributes
  397. ]
  398. else:
  399. self.description = None
  400. if self.server_side:
  401. self._cursor = await prepared_stmt.cursor(*parameters)
  402. self.rowcount = -1
  403. else:
  404. self._rows = deque(await prepared_stmt.fetch(*parameters))
  405. status = prepared_stmt.get_statusmsg()
  406. reg = re.match(
  407. r"(?:SELECT|UPDATE|DELETE|INSERT \d+) (\d+)",
  408. status or "",
  409. )
  410. if reg:
  411. self.rowcount = int(reg.group(1))
  412. else:
  413. self.rowcount = -1
  414. except Exception as error:
  415. self._handle_exception(error)
  416. async def _executemany(self, operation, seq_of_parameters):
  417. adapt_connection = self._adapt_connection
  418. self.description = None
  419. async with adapt_connection._execute_mutex:
  420. await adapt_connection._check_type_cache_invalidation(
  421. self._invalidate_schema_cache_asof
  422. )
  423. if not adapt_connection._started:
  424. await adapt_connection._start_transaction()
  425. try:
  426. return await self._connection.executemany(
  427. operation, seq_of_parameters
  428. )
  429. except Exception as error:
  430. self._handle_exception(error)
  431. def execute(self, operation, parameters=None):
  432. self._adapt_connection.await_(
  433. self._prepare_and_execute(operation, parameters)
  434. )
  435. def executemany(self, operation, seq_of_parameters):
  436. return self._adapt_connection.await_(
  437. self._executemany(operation, seq_of_parameters)
  438. )
  439. def setinputsizes(self, *inputsizes):
  440. raise NotImplementedError()
  441. def __iter__(self):
  442. while self._rows:
  443. yield self._rows.popleft()
  444. def fetchone(self):
  445. if self._rows:
  446. return self._rows.popleft()
  447. else:
  448. return None
  449. def fetchmany(self, size=None):
  450. if size is None:
  451. size = self.arraysize
  452. rr = self._rows
  453. return [rr.popleft() for _ in range(min(size, len(rr)))]
  454. def fetchall(self):
  455. retval = list(self._rows)
  456. self._rows.clear()
  457. return retval
  458. class AsyncAdapt_asyncpg_ss_cursor(AsyncAdapt_asyncpg_cursor):
  459. server_side = True
  460. __slots__ = ("_rowbuffer",)
  461. def __init__(self, adapt_connection):
  462. super().__init__(adapt_connection)
  463. self._rowbuffer = deque()
  464. def close(self):
  465. self._cursor = None
  466. self._rowbuffer.clear()
  467. def _buffer_rows(self):
  468. assert self._cursor is not None
  469. new_rows = self._adapt_connection.await_(self._cursor.fetch(50))
  470. self._rowbuffer.extend(new_rows)
  471. def __aiter__(self):
  472. return self
  473. async def __anext__(self):
  474. while True:
  475. while self._rowbuffer:
  476. yield self._rowbuffer.popleft()
  477. self._buffer_rows()
  478. if not self._rowbuffer:
  479. break
  480. def fetchone(self):
  481. if not self._rowbuffer:
  482. self._buffer_rows()
  483. if not self._rowbuffer:
  484. return None
  485. return self._rowbuffer.popleft()
  486. def fetchmany(self, size=None):
  487. if size is None:
  488. return self.fetchall()
  489. if not self._rowbuffer:
  490. self._buffer_rows()
  491. assert self._cursor is not None
  492. rb = self._rowbuffer
  493. lb = len(rb)
  494. if size > lb:
  495. rb.extend(
  496. self._adapt_connection.await_(self._cursor.fetch(size - lb))
  497. )
  498. return [rb.popleft() for _ in range(min(size, len(rb)))]
  499. def fetchall(self):
  500. ret = list(self._rowbuffer)
  501. ret.extend(self._adapt_connection.await_(self._all()))
  502. self._rowbuffer.clear()
  503. return ret
  504. async def _all(self):
  505. rows = []
  506. # TODO: looks like we have to hand-roll some kind of batching here.
  507. # hardcoding for the moment but this should be improved.
  508. while True:
  509. batch = await self._cursor.fetch(1000)
  510. if batch:
  511. rows.extend(batch)
  512. continue
  513. else:
  514. break
  515. return rows
  516. def executemany(self, operation, seq_of_parameters):
  517. raise NotImplementedError(
  518. "server side cursor doesn't support executemany yet"
  519. )
  520. class AsyncAdapt_asyncpg_connection(AdaptedConnection):
  521. __slots__ = (
  522. "dbapi",
  523. "isolation_level",
  524. "_isolation_setting",
  525. "readonly",
  526. "deferrable",
  527. "_transaction",
  528. "_started",
  529. "_prepared_statement_cache",
  530. "_prepared_statement_name_func",
  531. "_invalidate_schema_cache_asof",
  532. "_execute_mutex",
  533. )
  534. await_ = staticmethod(await_only)
  535. def __init__(
  536. self,
  537. dbapi,
  538. connection,
  539. prepared_statement_cache_size=100,
  540. prepared_statement_name_func=None,
  541. ):
  542. self.dbapi = dbapi
  543. self._connection = connection
  544. self.isolation_level = self._isolation_setting = None
  545. self.readonly = False
  546. self.deferrable = False
  547. self._transaction = None
  548. self._started = False
  549. self._invalidate_schema_cache_asof = time.time()
  550. self._execute_mutex = asyncio.Lock()
  551. if prepared_statement_cache_size:
  552. self._prepared_statement_cache = util.LRUCache(
  553. prepared_statement_cache_size
  554. )
  555. else:
  556. self._prepared_statement_cache = None
  557. if prepared_statement_name_func:
  558. self._prepared_statement_name_func = prepared_statement_name_func
  559. else:
  560. self._prepared_statement_name_func = self._default_name_func
  561. async def _check_type_cache_invalidation(self, invalidate_timestamp):
  562. if invalidate_timestamp > self._invalidate_schema_cache_asof:
  563. await self._connection.reload_schema_state()
  564. self._invalidate_schema_cache_asof = invalidate_timestamp
  565. async def _prepare(self, operation, invalidate_timestamp):
  566. await self._check_type_cache_invalidation(invalidate_timestamp)
  567. cache = self._prepared_statement_cache
  568. if cache is None:
  569. prepared_stmt = await self._connection.prepare(
  570. operation, name=self._prepared_statement_name_func()
  571. )
  572. attributes = prepared_stmt.get_attributes()
  573. return prepared_stmt, attributes
  574. # asyncpg uses a type cache for the "attributes" which seems to go
  575. # stale independently of the PreparedStatement itself, so place that
  576. # collection in the cache as well.
  577. if operation in cache:
  578. prepared_stmt, attributes, cached_timestamp = cache[operation]
  579. # preparedstatements themselves also go stale for certain DDL
  580. # changes such as size of a VARCHAR changing, so there is also
  581. # a cross-connection invalidation timestamp
  582. if cached_timestamp > invalidate_timestamp:
  583. return prepared_stmt, attributes
  584. prepared_stmt = await self._connection.prepare(
  585. operation, name=self._prepared_statement_name_func()
  586. )
  587. attributes = prepared_stmt.get_attributes()
  588. cache[operation] = (prepared_stmt, attributes, time.time())
  589. return prepared_stmt, attributes
  590. def _handle_exception(self, error):
  591. if self._connection.is_closed():
  592. self._transaction = None
  593. self._started = False
  594. if not isinstance(error, AsyncAdapt_asyncpg_dbapi.Error):
  595. exception_mapping = self.dbapi._asyncpg_error_translate
  596. for super_ in type(error).__mro__:
  597. if super_ in exception_mapping:
  598. translated_error = exception_mapping[super_](
  599. "%s: %s" % (type(error), error)
  600. )
  601. translated_error.pgcode = translated_error.sqlstate = (
  602. getattr(error, "sqlstate", None)
  603. )
  604. raise translated_error from error
  605. else:
  606. raise error
  607. else:
  608. raise error
  609. @property
  610. def autocommit(self):
  611. return self.isolation_level == "autocommit"
  612. @autocommit.setter
  613. def autocommit(self, value):
  614. if value:
  615. self.isolation_level = "autocommit"
  616. else:
  617. self.isolation_level = self._isolation_setting
  618. def ping(self):
  619. try:
  620. _ = self.await_(self._async_ping())
  621. except Exception as error:
  622. self._handle_exception(error)
  623. async def _async_ping(self):
  624. if self._transaction is None and self.isolation_level != "autocommit":
  625. # create a tranasction explicitly to support pgbouncer
  626. # transaction mode. See #10226
  627. tr = self._connection.transaction()
  628. await tr.start()
  629. try:
  630. await self._connection.fetchrow(";")
  631. finally:
  632. await tr.rollback()
  633. else:
  634. await self._connection.fetchrow(";")
  635. def set_isolation_level(self, level):
  636. if self._started:
  637. self.rollback()
  638. self.isolation_level = self._isolation_setting = level
  639. async def _start_transaction(self):
  640. if self.isolation_level == "autocommit":
  641. return
  642. try:
  643. self._transaction = self._connection.transaction(
  644. isolation=self.isolation_level,
  645. readonly=self.readonly,
  646. deferrable=self.deferrable,
  647. )
  648. await self._transaction.start()
  649. except Exception as error:
  650. self._handle_exception(error)
  651. else:
  652. self._started = True
  653. def cursor(self, server_side=False):
  654. if server_side:
  655. return AsyncAdapt_asyncpg_ss_cursor(self)
  656. else:
  657. return AsyncAdapt_asyncpg_cursor(self)
  658. async def _rollback_and_discard(self):
  659. try:
  660. await self._transaction.rollback()
  661. finally:
  662. # if asyncpg .rollback() was actually called, then whether or
  663. # not it raised or succeeded, the transation is done, discard it
  664. self._transaction = None
  665. self._started = False
  666. async def _commit_and_discard(self):
  667. try:
  668. await self._transaction.commit()
  669. finally:
  670. # if asyncpg .commit() was actually called, then whether or
  671. # not it raised or succeeded, the transation is done, discard it
  672. self._transaction = None
  673. self._started = False
  674. def rollback(self):
  675. if self._started:
  676. try:
  677. self.await_(self._rollback_and_discard())
  678. self._transaction = None
  679. self._started = False
  680. except Exception as error:
  681. # don't dereference asyncpg transaction if we didn't
  682. # actually try to call rollback() on it
  683. self._handle_exception(error)
  684. def commit(self):
  685. if self._started:
  686. try:
  687. self.await_(self._commit_and_discard())
  688. self._transaction = None
  689. self._started = False
  690. except Exception as error:
  691. # don't dereference asyncpg transaction if we didn't
  692. # actually try to call commit() on it
  693. self._handle_exception(error)
  694. def close(self):
  695. self.rollback()
  696. self.await_(self._connection.close())
  697. def terminate(self):
  698. if util.concurrency.in_greenlet():
  699. # in a greenlet; this is the connection was invalidated
  700. # case.
  701. try:
  702. # try to gracefully close; see #10717
  703. # timeout added in asyncpg 0.14.0 December 2017
  704. self.await_(asyncio.shield(self._connection.close(timeout=2)))
  705. except (
  706. asyncio.TimeoutError,
  707. asyncio.CancelledError,
  708. OSError,
  709. self.dbapi.asyncpg.PostgresError,
  710. ) as e:
  711. # in the case where we are recycling an old connection
  712. # that may have already been disconnected, close() will
  713. # fail with the above timeout. in this case, terminate
  714. # the connection without any further waiting.
  715. # see issue #8419
  716. self._connection.terminate()
  717. if isinstance(e, asyncio.CancelledError):
  718. # re-raise CancelledError if we were cancelled
  719. raise
  720. else:
  721. # not in a greenlet; this is the gc cleanup case
  722. self._connection.terminate()
  723. self._started = False
  724. @staticmethod
  725. def _default_name_func():
  726. return None
  727. class AsyncAdaptFallback_asyncpg_connection(AsyncAdapt_asyncpg_connection):
  728. __slots__ = ()
  729. await_ = staticmethod(await_fallback)
  730. class AsyncAdapt_asyncpg_dbapi:
  731. def __init__(self, asyncpg):
  732. self.asyncpg = asyncpg
  733. self.paramstyle = "numeric_dollar"
  734. def connect(self, *arg, **kw):
  735. async_fallback = kw.pop("async_fallback", False)
  736. creator_fn = kw.pop("async_creator_fn", self.asyncpg.connect)
  737. prepared_statement_cache_size = kw.pop(
  738. "prepared_statement_cache_size", 100
  739. )
  740. prepared_statement_name_func = kw.pop(
  741. "prepared_statement_name_func", None
  742. )
  743. if util.asbool(async_fallback):
  744. return AsyncAdaptFallback_asyncpg_connection(
  745. self,
  746. await_fallback(creator_fn(*arg, **kw)),
  747. prepared_statement_cache_size=prepared_statement_cache_size,
  748. prepared_statement_name_func=prepared_statement_name_func,
  749. )
  750. else:
  751. return AsyncAdapt_asyncpg_connection(
  752. self,
  753. await_only(creator_fn(*arg, **kw)),
  754. prepared_statement_cache_size=prepared_statement_cache_size,
  755. prepared_statement_name_func=prepared_statement_name_func,
  756. )
  757. class Error(Exception):
  758. pass
  759. class Warning(Exception): # noqa
  760. pass
  761. class InterfaceError(Error):
  762. pass
  763. class DatabaseError(Error):
  764. pass
  765. class InternalError(DatabaseError):
  766. pass
  767. class OperationalError(DatabaseError):
  768. pass
  769. class ProgrammingError(DatabaseError):
  770. pass
  771. class IntegrityError(DatabaseError):
  772. pass
  773. class DataError(DatabaseError):
  774. pass
  775. class NotSupportedError(DatabaseError):
  776. pass
  777. class InternalServerError(InternalError):
  778. pass
  779. class InvalidCachedStatementError(NotSupportedError):
  780. def __init__(self, message):
  781. super().__init__(
  782. message + " (SQLAlchemy asyncpg dialect will now invalidate "
  783. "all prepared caches in response to this exception)",
  784. )
  785. # pep-249 datatype placeholders. As of SQLAlchemy 2.0 these aren't
  786. # used, however the test suite looks for these in a few cases.
  787. STRING = util.symbol("STRING")
  788. NUMBER = util.symbol("NUMBER")
  789. DATETIME = util.symbol("DATETIME")
  790. @util.memoized_property
  791. def _asyncpg_error_translate(self):
  792. import asyncpg
  793. return {
  794. asyncpg.exceptions.IntegrityConstraintViolationError: self.IntegrityError, # noqa: E501
  795. asyncpg.exceptions.PostgresError: self.Error,
  796. asyncpg.exceptions.SyntaxOrAccessError: self.ProgrammingError,
  797. asyncpg.exceptions.InterfaceError: self.InterfaceError,
  798. asyncpg.exceptions.InvalidCachedStatementError: self.InvalidCachedStatementError, # noqa: E501
  799. asyncpg.exceptions.InternalServerError: self.InternalServerError,
  800. }
  801. def Binary(self, value):
  802. return value
  803. class PGDialect_asyncpg(PGDialect):
  804. driver = "asyncpg"
  805. supports_statement_cache = True
  806. supports_server_side_cursors = True
  807. render_bind_cast = True
  808. has_terminate = True
  809. default_paramstyle = "numeric_dollar"
  810. supports_sane_multi_rowcount = False
  811. execution_ctx_cls = PGExecutionContext_asyncpg
  812. statement_compiler = PGCompiler_asyncpg
  813. preparer = PGIdentifierPreparer_asyncpg
  814. colspecs = util.update_copy(
  815. PGDialect.colspecs,
  816. {
  817. sqltypes.String: AsyncpgString,
  818. sqltypes.ARRAY: AsyncpgARRAY,
  819. BIT: AsyncpgBit,
  820. CITEXT: CITEXT,
  821. REGCONFIG: AsyncpgREGCONFIG,
  822. sqltypes.Time: AsyncpgTime,
  823. sqltypes.Date: AsyncpgDate,
  824. sqltypes.DateTime: AsyncpgDateTime,
  825. sqltypes.Interval: AsyncPgInterval,
  826. INTERVAL: AsyncPgInterval,
  827. sqltypes.Boolean: AsyncpgBoolean,
  828. sqltypes.Integer: AsyncpgInteger,
  829. sqltypes.SmallInteger: AsyncpgSmallInteger,
  830. sqltypes.BigInteger: AsyncpgBigInteger,
  831. sqltypes.Numeric: AsyncpgNumeric,
  832. sqltypes.Float: AsyncpgFloat,
  833. sqltypes.JSON: AsyncpgJSON,
  834. sqltypes.LargeBinary: AsyncpgByteA,
  835. json.JSONB: AsyncpgJSONB,
  836. sqltypes.JSON.JSONPathType: AsyncpgJSONPathType,
  837. sqltypes.JSON.JSONIndexType: AsyncpgJSONIndexType,
  838. sqltypes.JSON.JSONIntIndexType: AsyncpgJSONIntIndexType,
  839. sqltypes.JSON.JSONStrIndexType: AsyncpgJSONStrIndexType,
  840. sqltypes.Enum: AsyncPgEnum,
  841. OID: AsyncpgOID,
  842. REGCLASS: AsyncpgREGCLASS,
  843. sqltypes.CHAR: AsyncpgCHAR,
  844. ranges.AbstractSingleRange: _AsyncpgRange,
  845. ranges.AbstractMultiRange: _AsyncpgMultiRange,
  846. },
  847. )
  848. is_async = True
  849. _invalidate_schema_cache_asof = 0
  850. def _invalidate_schema_cache(self):
  851. self._invalidate_schema_cache_asof = time.time()
  852. @util.memoized_property
  853. def _dbapi_version(self):
  854. if self.dbapi and hasattr(self.dbapi, "__version__"):
  855. return tuple(
  856. [
  857. int(x)
  858. for x in re.findall(
  859. r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
  860. )
  861. ]
  862. )
  863. else:
  864. return (99, 99, 99)
  865. @classmethod
  866. def import_dbapi(cls):
  867. return AsyncAdapt_asyncpg_dbapi(__import__("asyncpg"))
  868. @util.memoized_property
  869. def _isolation_lookup(self):
  870. return {
  871. "AUTOCOMMIT": "autocommit",
  872. "READ COMMITTED": "read_committed",
  873. "REPEATABLE READ": "repeatable_read",
  874. "SERIALIZABLE": "serializable",
  875. }
  876. def get_isolation_level_values(self, dbapi_connection):
  877. return list(self._isolation_lookup)
  878. def set_isolation_level(self, dbapi_connection, level):
  879. dbapi_connection.set_isolation_level(self._isolation_lookup[level])
  880. def detect_autocommit_setting(self, dbapi_conn) -> bool:
  881. return bool(dbapi_conn.autocommit)
  882. def set_readonly(self, connection, value):
  883. connection.readonly = value
  884. def get_readonly(self, connection):
  885. return connection.readonly
  886. def set_deferrable(self, connection, value):
  887. connection.deferrable = value
  888. def get_deferrable(self, connection):
  889. return connection.deferrable
  890. def do_terminate(self, dbapi_connection) -> None:
  891. dbapi_connection.terminate()
  892. def create_connect_args(self, url):
  893. opts = url.translate_connect_args(username="user")
  894. multihosts, multiports = self._split_multihost_from_url(url)
  895. opts.update(url.query)
  896. if multihosts:
  897. assert multiports
  898. if len(multihosts) == 1:
  899. opts["host"] = multihosts[0]
  900. if multiports[0] is not None:
  901. opts["port"] = multiports[0]
  902. elif not all(multihosts):
  903. raise exc.ArgumentError(
  904. "All hosts are required to be present"
  905. " for asyncpg multiple host URL"
  906. )
  907. elif not all(multiports):
  908. raise exc.ArgumentError(
  909. "All ports are required to be present"
  910. " for asyncpg multiple host URL"
  911. )
  912. else:
  913. opts["host"] = list(multihosts)
  914. opts["port"] = list(multiports)
  915. else:
  916. util.coerce_kw_type(opts, "port", int)
  917. util.coerce_kw_type(opts, "prepared_statement_cache_size", int)
  918. return ([], opts)
  919. def do_ping(self, dbapi_connection):
  920. dbapi_connection.ping()
  921. return True
  922. @classmethod
  923. def get_pool_class(cls, url):
  924. async_fallback = url.query.get("async_fallback", False)
  925. if util.asbool(async_fallback):
  926. return pool.FallbackAsyncAdaptedQueuePool
  927. else:
  928. return pool.AsyncAdaptedQueuePool
  929. def is_disconnect(self, e, connection, cursor):
  930. if connection:
  931. return connection._connection.is_closed()
  932. else:
  933. return isinstance(
  934. e, self.dbapi.InterfaceError
  935. ) and "connection is closed" in str(e)
  936. async def setup_asyncpg_json_codec(self, conn):
  937. """set up JSON codec for asyncpg.
  938. This occurs for all new connections and
  939. can be overridden by third party dialects.
  940. .. versionadded:: 1.4.27
  941. """
  942. asyncpg_connection = conn._connection
  943. deserializer = self._json_deserializer or _py_json.loads
  944. def _json_decoder(bin_value):
  945. return deserializer(bin_value.decode())
  946. await asyncpg_connection.set_type_codec(
  947. "json",
  948. encoder=str.encode,
  949. decoder=_json_decoder,
  950. schema="pg_catalog",
  951. format="binary",
  952. )
  953. async def setup_asyncpg_jsonb_codec(self, conn):
  954. """set up JSONB codec for asyncpg.
  955. This occurs for all new connections and
  956. can be overridden by third party dialects.
  957. .. versionadded:: 1.4.27
  958. """
  959. asyncpg_connection = conn._connection
  960. deserializer = self._json_deserializer or _py_json.loads
  961. def _jsonb_encoder(str_value):
  962. # \x01 is the prefix for jsonb used by PostgreSQL.
  963. # asyncpg requires it when format='binary'
  964. return b"\x01" + str_value.encode()
  965. deserializer = self._json_deserializer or _py_json.loads
  966. def _jsonb_decoder(bin_value):
  967. # the byte is the \x01 prefix for jsonb used by PostgreSQL.
  968. # asyncpg returns it when format='binary'
  969. return deserializer(bin_value[1:].decode())
  970. await asyncpg_connection.set_type_codec(
  971. "jsonb",
  972. encoder=_jsonb_encoder,
  973. decoder=_jsonb_decoder,
  974. schema="pg_catalog",
  975. format="binary",
  976. )
  977. async def _disable_asyncpg_inet_codecs(self, conn):
  978. asyncpg_connection = conn._connection
  979. await asyncpg_connection.set_type_codec(
  980. "inet",
  981. encoder=lambda s: s,
  982. decoder=lambda s: s,
  983. schema="pg_catalog",
  984. format="text",
  985. )
  986. await asyncpg_connection.set_type_codec(
  987. "cidr",
  988. encoder=lambda s: s,
  989. decoder=lambda s: s,
  990. schema="pg_catalog",
  991. format="text",
  992. )
  993. def on_connect(self):
  994. """on_connect for asyncpg
  995. A major component of this for asyncpg is to set up type decoders at the
  996. asyncpg level.
  997. See https://github.com/MagicStack/asyncpg/issues/623 for
  998. notes on JSON/JSONB implementation.
  999. """
  1000. super_connect = super().on_connect()
  1001. def connect(conn):
  1002. conn.await_(self.setup_asyncpg_json_codec(conn))
  1003. conn.await_(self.setup_asyncpg_jsonb_codec(conn))
  1004. if self._native_inet_types is False:
  1005. conn.await_(self._disable_asyncpg_inet_codecs(conn))
  1006. if super_connect is not None:
  1007. super_connect(conn)
  1008. return connect
  1009. def get_driver_connection(self, connection):
  1010. return connection._connection
  1011. dialect = PGDialect_asyncpg