pyodbc.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. # dialects/mssql/pyodbc.py
  2. # Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
  3. # <see AUTHORS file>
  4. #
  5. # This module is part of SQLAlchemy and is released under
  6. # the MIT License: https://www.opensource.org/licenses/mit-license.php
  7. # mypy: ignore-errors
  8. r"""
  9. .. dialect:: mssql+pyodbc
  10. :name: PyODBC
  11. :dbapi: pyodbc
  12. :connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
  13. :url: https://pypi.org/project/pyodbc/
  14. Connecting to PyODBC
  15. --------------------
  16. The URL here is to be translated to PyODBC connection strings, as
  17. detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
  18. DSN Connections
  19. ^^^^^^^^^^^^^^^
  20. A DSN connection in ODBC means that a pre-existing ODBC datasource is
  21. configured on the client machine. The application then specifies the name
  22. of this datasource, which encompasses details such as the specific ODBC driver
  23. in use as well as the network address of the database. Assuming a datasource
  24. is configured on the client, a basic DSN-based connection looks like::
  25. engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
  26. Which above, will pass the following connection string to PyODBC:
  27. .. sourcecode:: text
  28. DSN=some_dsn;UID=scott;PWD=tiger
  29. If the username and password are omitted, the DSN form will also add
  30. the ``Trusted_Connection=yes`` directive to the ODBC string.
  31. Hostname Connections
  32. ^^^^^^^^^^^^^^^^^^^^
  33. Hostname-based connections are also supported by pyodbc. These are often
  34. easier to use than a DSN and have the additional advantage that the specific
  35. database name to connect towards may be specified locally in the URL, rather
  36. than it being fixed as part of a datasource configuration.
  37. When using a hostname connection, the driver name must also be specified in the
  38. query parameters of the URL. As these names usually have spaces in them, the
  39. name must be URL encoded which means using plus signs for spaces::
  40. engine = create_engine(
  41. "mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server"
  42. )
  43. The ``driver`` keyword is significant to the pyodbc dialect and must be
  44. specified in lowercase.
  45. Any other names passed in the query string are passed through in the pyodbc
  46. connect string, such as ``authentication``, ``TrustServerCertificate``, etc.
  47. Multiple keyword arguments must be separated by an ampersand (``&``); these
  48. will be translated to semicolons when the pyodbc connect string is generated
  49. internally::
  50. e = create_engine(
  51. "mssql+pyodbc://scott:tiger@mssql2017:1433/test?"
  52. "driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
  53. "&authentication=ActiveDirectoryIntegrated"
  54. )
  55. The equivalent URL can be constructed using :class:`_sa.engine.URL`::
  56. from sqlalchemy.engine import URL
  57. connection_url = URL.create(
  58. "mssql+pyodbc",
  59. username="scott",
  60. password="tiger",
  61. host="mssql2017",
  62. port=1433,
  63. database="test",
  64. query={
  65. "driver": "ODBC Driver 18 for SQL Server",
  66. "TrustServerCertificate": "yes",
  67. "authentication": "ActiveDirectoryIntegrated",
  68. },
  69. )
  70. Pass through exact Pyodbc string
  71. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  72. A PyODBC connection string can also be sent in pyodbc's format directly, as
  73. specified in `the PyODBC documentation
  74. <https://github.com/mkleehammer/pyodbc/wiki/Connecting-to-databases>`_,
  75. using the parameter ``odbc_connect``. A :class:`_sa.engine.URL` object
  76. can help make this easier::
  77. from sqlalchemy.engine import URL
  78. connection_string = "DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password"
  79. connection_url = URL.create(
  80. "mssql+pyodbc", query={"odbc_connect": connection_string}
  81. )
  82. engine = create_engine(connection_url)
  83. .. _mssql_pyodbc_access_tokens:
  84. Connecting to databases with access tokens
  85. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  86. Some database servers are set up to only accept access tokens for login. For
  87. example, SQL Server allows the use of Azure Active Directory tokens to connect
  88. to databases. This requires creating a credential object using the
  89. ``azure-identity`` library. More information about the authentication step can be
  90. found in `Microsoft's documentation
  91. <https://docs.microsoft.com/en-us/azure/developer/python/azure-sdk-authenticate?tabs=bash>`_.
  92. After getting an engine, the credentials need to be sent to ``pyodbc.connect``
  93. each time a connection is requested. One way to do this is to set up an event
  94. listener on the engine that adds the credential token to the dialect's connect
  95. call. This is discussed more generally in :ref:`engines_dynamic_tokens`. For
  96. SQL Server in particular, this is passed as an ODBC connection attribute with
  97. a data structure `described by Microsoft
  98. <https://docs.microsoft.com/en-us/sql/connect/odbc/using-azure-active-directory#authenticating-with-an-access-token>`_.
  99. The following code snippet will create an engine that connects to an Azure SQL
  100. database using Azure credentials::
  101. import struct
  102. from sqlalchemy import create_engine, event
  103. from sqlalchemy.engine.url import URL
  104. from azure import identity
  105. # Connection option for access tokens, as defined in msodbcsql.h
  106. SQL_COPT_SS_ACCESS_TOKEN = 1256
  107. TOKEN_URL = "https://database.windows.net/" # The token URL for any Azure SQL database
  108. connection_string = "mssql+pyodbc://@my-server.database.windows.net/myDb?driver=ODBC+Driver+17+for+SQL+Server"
  109. engine = create_engine(connection_string)
  110. azure_credentials = identity.DefaultAzureCredential()
  111. @event.listens_for(engine, "do_connect")
  112. def provide_token(dialect, conn_rec, cargs, cparams):
  113. # remove the "Trusted_Connection" parameter that SQLAlchemy adds
  114. cargs[0] = cargs[0].replace(";Trusted_Connection=Yes", "")
  115. # create token credential
  116. raw_token = azure_credentials.get_token(TOKEN_URL).token.encode(
  117. "utf-16-le"
  118. )
  119. token_struct = struct.pack(
  120. f"<I{len(raw_token)}s", len(raw_token), raw_token
  121. )
  122. # apply it to keyword arguments
  123. cparams["attrs_before"] = {SQL_COPT_SS_ACCESS_TOKEN: token_struct}
  124. .. tip::
  125. The ``Trusted_Connection`` token is currently added by the SQLAlchemy
  126. pyodbc dialect when no username or password is present. This needs
  127. to be removed per Microsoft's
  128. `documentation for Azure access tokens
  129. <https://docs.microsoft.com/en-us/sql/connect/odbc/using-azure-active-directory#authenticating-with-an-access-token>`_,
  130. stating that a connection string when using an access token must not contain
  131. ``UID``, ``PWD``, ``Authentication`` or ``Trusted_Connection`` parameters.
  132. .. _azure_synapse_ignore_no_transaction_on_rollback:
  133. Avoiding transaction-related exceptions on Azure Synapse Analytics
  134. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  135. Azure Synapse Analytics has a significant difference in its transaction
  136. handling compared to plain SQL Server; in some cases an error within a Synapse
  137. transaction can cause it to be arbitrarily terminated on the server side, which
  138. then causes the DBAPI ``.rollback()`` method (as well as ``.commit()``) to
  139. fail. The issue prevents the usual DBAPI contract of allowing ``.rollback()``
  140. to pass silently if no transaction is present as the driver does not expect
  141. this condition. The symptom of this failure is an exception with a message
  142. resembling 'No corresponding transaction found. (111214)' when attempting to
  143. emit a ``.rollback()`` after an operation had a failure of some kind.
  144. This specific case can be handled by passing ``ignore_no_transaction_on_rollback=True`` to
  145. the SQL Server dialect via the :func:`_sa.create_engine` function as follows::
  146. engine = create_engine(
  147. connection_url, ignore_no_transaction_on_rollback=True
  148. )
  149. Using the above parameter, the dialect will catch ``ProgrammingError``
  150. exceptions raised during ``connection.rollback()`` and emit a warning
  151. if the error message contains code ``111214``, however will not raise
  152. an exception.
  153. .. versionadded:: 1.4.40 Added the
  154. ``ignore_no_transaction_on_rollback=True`` parameter.
  155. Enable autocommit for Azure SQL Data Warehouse (DW) connections
  156. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  157. Azure SQL Data Warehouse does not support transactions,
  158. and that can cause problems with SQLAlchemy's "autobegin" (and implicit
  159. commit/rollback) behavior. We can avoid these problems by enabling autocommit
  160. at both the pyodbc and engine levels::
  161. connection_url = sa.engine.URL.create(
  162. "mssql+pyodbc",
  163. username="scott",
  164. password="tiger",
  165. host="dw.azure.example.com",
  166. database="mydb",
  167. query={
  168. "driver": "ODBC Driver 17 for SQL Server",
  169. "autocommit": "True",
  170. },
  171. )
  172. engine = create_engine(connection_url).execution_options(
  173. isolation_level="AUTOCOMMIT"
  174. )
  175. Avoiding sending large string parameters as TEXT/NTEXT
  176. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  177. By default, for historical reasons, Microsoft's ODBC drivers for SQL Server
  178. send long string parameters (greater than 4000 SBCS characters or 2000 Unicode
  179. characters) as TEXT/NTEXT values. TEXT and NTEXT have been deprecated for many
  180. years and are starting to cause compatibility issues with newer versions of
  181. SQL_Server/Azure. For example, see `this
  182. issue <https://github.com/mkleehammer/pyodbc/issues/835>`_.
  183. Starting with ODBC Driver 18 for SQL Server we can override the legacy
  184. behavior and pass long strings as varchar(max)/nvarchar(max) using the
  185. ``LongAsMax=Yes`` connection string parameter::
  186. connection_url = sa.engine.URL.create(
  187. "mssql+pyodbc",
  188. username="scott",
  189. password="tiger",
  190. host="mssqlserver.example.com",
  191. database="mydb",
  192. query={
  193. "driver": "ODBC Driver 18 for SQL Server",
  194. "LongAsMax": "Yes",
  195. },
  196. )
  197. Pyodbc Pooling / connection close behavior
  198. ------------------------------------------
  199. PyODBC uses internal `pooling
  200. <https://github.com/mkleehammer/pyodbc/wiki/The-pyodbc-Module#pooling>`_ by
  201. default, which means connections will be longer lived than they are within
  202. SQLAlchemy itself. As SQLAlchemy has its own pooling behavior, it is often
  203. preferable to disable this behavior. This behavior can only be disabled
  204. globally at the PyODBC module level, **before** any connections are made::
  205. import pyodbc
  206. pyodbc.pooling = False
  207. # don't use the engine before pooling is set to False
  208. engine = create_engine("mssql+pyodbc://user:pass@dsn")
  209. If this variable is left at its default value of ``True``, **the application
  210. will continue to maintain active database connections**, even when the
  211. SQLAlchemy engine itself fully discards a connection or if the engine is
  212. disposed.
  213. .. seealso::
  214. `pooling <https://github.com/mkleehammer/pyodbc/wiki/The-pyodbc-Module#pooling>`_ -
  215. in the PyODBC documentation.
  216. Driver / Unicode Support
  217. -------------------------
  218. PyODBC works best with Microsoft ODBC drivers, particularly in the area
  219. of Unicode support on both Python 2 and Python 3.
  220. Using the FreeTDS ODBC drivers on Linux or OSX with PyODBC is **not**
  221. recommended; there have been historically many Unicode-related issues
  222. in this area, including before Microsoft offered ODBC drivers for Linux
  223. and OSX. Now that Microsoft offers drivers for all platforms, for
  224. PyODBC support these are recommended. FreeTDS remains relevant for
  225. non-ODBC drivers such as pymssql where it works very well.
  226. Rowcount Support
  227. ----------------
  228. Previous limitations with the SQLAlchemy ORM's "versioned rows" feature with
  229. Pyodbc have been resolved as of SQLAlchemy 2.0.5. See the notes at
  230. :ref:`mssql_rowcount_versioning`.
  231. .. _mssql_pyodbc_fastexecutemany:
  232. Fast Executemany Mode
  233. ---------------------
  234. The PyODBC driver includes support for a "fast executemany" mode of execution
  235. which greatly reduces round trips for a DBAPI ``executemany()`` call when using
  236. Microsoft ODBC drivers, for **limited size batches that fit in memory**. The
  237. feature is enabled by setting the attribute ``.fast_executemany`` on the DBAPI
  238. cursor when an executemany call is to be used. The SQLAlchemy PyODBC SQL
  239. Server dialect supports this parameter by passing the
  240. ``fast_executemany`` parameter to
  241. :func:`_sa.create_engine` , when using the **Microsoft ODBC driver only**::
  242. engine = create_engine(
  243. "mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
  244. fast_executemany=True,
  245. )
  246. .. versionchanged:: 2.0.9 - the ``fast_executemany`` parameter now has its
  247. intended effect of this PyODBC feature taking effect for all INSERT
  248. statements that are executed with multiple parameter sets, which don't
  249. include RETURNING. Previously, SQLAlchemy 2.0's :term:`insertmanyvalues`
  250. feature would cause ``fast_executemany`` to not be used in most cases
  251. even if specified.
  252. .. versionadded:: 1.3
  253. .. seealso::
  254. `fast executemany <https://github.com/mkleehammer/pyodbc/wiki/Features-beyond-the-DB-API#fast_executemany>`_
  255. - on github
  256. .. _mssql_pyodbc_setinputsizes:
  257. Setinputsizes Support
  258. -----------------------
  259. As of version 2.0, the pyodbc ``cursor.setinputsizes()`` method is used for
  260. all statement executions, except for ``cursor.executemany()`` calls when
  261. fast_executemany=True where it is not supported (assuming
  262. :ref:`insertmanyvalues <engine_insertmanyvalues>` is kept enabled,
  263. "fastexecutemany" will not take place for INSERT statements in any case).
  264. The use of ``cursor.setinputsizes()`` can be disabled by passing
  265. ``use_setinputsizes=False`` to :func:`_sa.create_engine`.
  266. When ``use_setinputsizes`` is left at its default of ``True``, the
  267. specific per-type symbols passed to ``cursor.setinputsizes()`` can be
  268. programmatically customized using the :meth:`.DialectEvents.do_setinputsizes`
  269. hook. See that method for usage examples.
  270. .. versionchanged:: 2.0 The mssql+pyodbc dialect now defaults to using
  271. ``use_setinputsizes=True`` for all statement executions with the exception of
  272. cursor.executemany() calls when fast_executemany=True. The behavior can
  273. be turned off by passing ``use_setinputsizes=False`` to
  274. :func:`_sa.create_engine`.
  275. """ # noqa
  276. import datetime
  277. import decimal
  278. import re
  279. import struct
  280. from .base import _MSDateTime
  281. from .base import _MSUnicode
  282. from .base import _MSUnicodeText
  283. from .base import BINARY
  284. from .base import DATETIMEOFFSET
  285. from .base import MSDialect
  286. from .base import MSExecutionContext
  287. from .base import VARBINARY
  288. from .json import JSON as _MSJson
  289. from .json import JSONIndexType as _MSJsonIndexType
  290. from .json import JSONPathType as _MSJsonPathType
  291. from ... import exc
  292. from ... import types as sqltypes
  293. from ... import util
  294. from ...connectors.pyodbc import PyODBCConnector
  295. from ...engine import cursor as _cursor
  296. class _ms_numeric_pyodbc:
  297. """Turns Decimals with adjusted() < 0 or > 7 into strings.
  298. The routines here are needed for older pyodbc versions
  299. as well as current mxODBC versions.
  300. """
  301. def bind_processor(self, dialect):
  302. super_process = super().bind_processor(dialect)
  303. if not dialect._need_decimal_fix:
  304. return super_process
  305. def process(value):
  306. if self.asdecimal and isinstance(value, decimal.Decimal):
  307. adjusted = value.adjusted()
  308. if adjusted < 0:
  309. return self._small_dec_to_string(value)
  310. elif adjusted > 7:
  311. return self._large_dec_to_string(value)
  312. if super_process:
  313. return super_process(value)
  314. else:
  315. return value
  316. return process
  317. # these routines needed for older versions of pyodbc.
  318. # as of 2.1.8 this logic is integrated.
  319. def _small_dec_to_string(self, value):
  320. return "%s0.%s%s" % (
  321. (value < 0 and "-" or ""),
  322. "0" * (abs(value.adjusted()) - 1),
  323. "".join([str(nint) for nint in value.as_tuple()[1]]),
  324. )
  325. def _large_dec_to_string(self, value):
  326. _int = value.as_tuple()[1]
  327. if "E" in str(value):
  328. result = "%s%s%s" % (
  329. (value < 0 and "-" or ""),
  330. "".join([str(s) for s in _int]),
  331. "0" * (value.adjusted() - (len(_int) - 1)),
  332. )
  333. else:
  334. if (len(_int) - 1) > value.adjusted():
  335. result = "%s%s.%s" % (
  336. (value < 0 and "-" or ""),
  337. "".join([str(s) for s in _int][0 : value.adjusted() + 1]),
  338. "".join([str(s) for s in _int][value.adjusted() + 1 :]),
  339. )
  340. else:
  341. result = "%s%s" % (
  342. (value < 0 and "-" or ""),
  343. "".join([str(s) for s in _int][0 : value.adjusted() + 1]),
  344. )
  345. return result
  346. class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
  347. pass
  348. class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
  349. pass
  350. class _ms_binary_pyodbc:
  351. """Wraps binary values in dialect-specific Binary wrapper.
  352. If the value is null, return a pyodbc-specific BinaryNull
  353. object to prevent pyODBC [and FreeTDS] from defaulting binary
  354. NULL types to SQLWCHAR and causing implicit conversion errors.
  355. """
  356. def bind_processor(self, dialect):
  357. if dialect.dbapi is None:
  358. return None
  359. DBAPIBinary = dialect.dbapi.Binary
  360. def process(value):
  361. if value is not None:
  362. return DBAPIBinary(value)
  363. else:
  364. # pyodbc-specific
  365. return dialect.dbapi.BinaryNull
  366. return process
  367. class _ODBCDateTimeBindProcessor:
  368. """Add bind processors to handle datetimeoffset behaviors"""
  369. has_tz = False
  370. def bind_processor(self, dialect):
  371. def process(value):
  372. if value is None:
  373. return None
  374. elif isinstance(value, str):
  375. # if a string was passed directly, allow it through
  376. return value
  377. elif not value.tzinfo or (not self.timezone and not self.has_tz):
  378. # for DateTime(timezone=False)
  379. return value
  380. else:
  381. # for DATETIMEOFFSET or DateTime(timezone=True)
  382. #
  383. # Convert to string format required by T-SQL
  384. dto_string = value.strftime("%Y-%m-%d %H:%M:%S.%f %z")
  385. # offset needs a colon, e.g., -0700 -> -07:00
  386. # "UTC offset in the form (+-)HHMM[SS[.ffffff]]"
  387. # backend currently rejects seconds / fractional seconds
  388. dto_string = re.sub(
  389. r"([\+\-]\d{2})([\d\.]+)$", r"\1:\2", dto_string
  390. )
  391. return dto_string
  392. return process
  393. class _ODBCDateTime(_ODBCDateTimeBindProcessor, _MSDateTime):
  394. pass
  395. class _ODBCDATETIMEOFFSET(_ODBCDateTimeBindProcessor, DATETIMEOFFSET):
  396. has_tz = True
  397. class _VARBINARY_pyodbc(_ms_binary_pyodbc, VARBINARY):
  398. pass
  399. class _BINARY_pyodbc(_ms_binary_pyodbc, BINARY):
  400. pass
  401. class _String_pyodbc(sqltypes.String):
  402. def get_dbapi_type(self, dbapi):
  403. if self.length in (None, "max") or self.length >= 2000:
  404. return (dbapi.SQL_VARCHAR, 0, 0)
  405. else:
  406. return dbapi.SQL_VARCHAR
  407. class _Unicode_pyodbc(_MSUnicode):
  408. def get_dbapi_type(self, dbapi):
  409. if self.length in (None, "max") or self.length >= 2000:
  410. return (dbapi.SQL_WVARCHAR, 0, 0)
  411. else:
  412. return dbapi.SQL_WVARCHAR
  413. class _UnicodeText_pyodbc(_MSUnicodeText):
  414. def get_dbapi_type(self, dbapi):
  415. if self.length in (None, "max") or self.length >= 2000:
  416. return (dbapi.SQL_WVARCHAR, 0, 0)
  417. else:
  418. return dbapi.SQL_WVARCHAR
  419. class _JSON_pyodbc(_MSJson):
  420. def get_dbapi_type(self, dbapi):
  421. return (dbapi.SQL_WVARCHAR, 0, 0)
  422. class _JSONIndexType_pyodbc(_MSJsonIndexType):
  423. def get_dbapi_type(self, dbapi):
  424. return dbapi.SQL_WVARCHAR
  425. class _JSONPathType_pyodbc(_MSJsonPathType):
  426. def get_dbapi_type(self, dbapi):
  427. return dbapi.SQL_WVARCHAR
  428. class MSExecutionContext_pyodbc(MSExecutionContext):
  429. _embedded_scope_identity = False
  430. def pre_exec(self):
  431. """where appropriate, issue "select scope_identity()" in the same
  432. statement.
  433. Background on why "scope_identity()" is preferable to "@@identity":
  434. https://msdn.microsoft.com/en-us/library/ms190315.aspx
  435. Background on why we attempt to embed "scope_identity()" into the same
  436. statement as the INSERT:
  437. https://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
  438. """
  439. super().pre_exec()
  440. # don't embed the scope_identity select into an
  441. # "INSERT .. DEFAULT VALUES"
  442. if (
  443. self._select_lastrowid
  444. and self.dialect.use_scope_identity
  445. and len(self.parameters[0])
  446. ):
  447. self._embedded_scope_identity = True
  448. self.statement += "; select scope_identity()"
  449. def post_exec(self):
  450. if self._embedded_scope_identity:
  451. # Fetch the last inserted id from the manipulated statement
  452. # We may have to skip over a number of result sets with
  453. # no data (due to triggers, etc.)
  454. while True:
  455. try:
  456. # fetchall() ensures the cursor is consumed
  457. # without closing it (FreeTDS particularly)
  458. rows = self.cursor.fetchall()
  459. except self.dialect.dbapi.Error:
  460. # no way around this - nextset() consumes the previous set
  461. # so we need to just keep flipping
  462. self.cursor.nextset()
  463. else:
  464. if not rows:
  465. # async adapter drivers just return None here
  466. self.cursor.nextset()
  467. continue
  468. row = rows[0]
  469. break
  470. self._lastrowid = int(row[0])
  471. self.cursor_fetch_strategy = _cursor._NO_CURSOR_DML
  472. else:
  473. super().post_exec()
  474. class MSDialect_pyodbc(PyODBCConnector, MSDialect):
  475. supports_statement_cache = True
  476. # note this parameter is no longer used by the ORM or default dialect
  477. # see #9414
  478. supports_sane_rowcount_returning = False
  479. execution_ctx_cls = MSExecutionContext_pyodbc
  480. colspecs = util.update_copy(
  481. MSDialect.colspecs,
  482. {
  483. sqltypes.Numeric: _MSNumeric_pyodbc,
  484. sqltypes.Float: _MSFloat_pyodbc,
  485. BINARY: _BINARY_pyodbc,
  486. # support DateTime(timezone=True)
  487. sqltypes.DateTime: _ODBCDateTime,
  488. DATETIMEOFFSET: _ODBCDATETIMEOFFSET,
  489. # SQL Server dialect has a VARBINARY that is just to support
  490. # "deprecate_large_types" w/ VARBINARY(max), but also we must
  491. # handle the usual SQL standard VARBINARY
  492. VARBINARY: _VARBINARY_pyodbc,
  493. sqltypes.VARBINARY: _VARBINARY_pyodbc,
  494. sqltypes.LargeBinary: _VARBINARY_pyodbc,
  495. sqltypes.String: _String_pyodbc,
  496. sqltypes.Unicode: _Unicode_pyodbc,
  497. sqltypes.UnicodeText: _UnicodeText_pyodbc,
  498. sqltypes.JSON: _JSON_pyodbc,
  499. sqltypes.JSON.JSONIndexType: _JSONIndexType_pyodbc,
  500. sqltypes.JSON.JSONPathType: _JSONPathType_pyodbc,
  501. # this excludes Enum from the string/VARCHAR thing for now
  502. # it looks like Enum's adaptation doesn't really support the
  503. # String type itself having a dialect-level impl
  504. sqltypes.Enum: sqltypes.Enum,
  505. },
  506. )
  507. def __init__(
  508. self,
  509. fast_executemany=False,
  510. use_setinputsizes=True,
  511. **params,
  512. ):
  513. super().__init__(use_setinputsizes=use_setinputsizes, **params)
  514. self.use_scope_identity = (
  515. self.use_scope_identity
  516. and self.dbapi
  517. and hasattr(self.dbapi.Cursor, "nextset")
  518. )
  519. self._need_decimal_fix = self.dbapi and self._dbapi_version() < (
  520. 2,
  521. 1,
  522. 8,
  523. )
  524. self.fast_executemany = fast_executemany
  525. if fast_executemany:
  526. self.use_insertmanyvalues_wo_returning = False
  527. def _get_server_version_info(self, connection):
  528. try:
  529. # "Version of the instance of SQL Server, in the form
  530. # of 'major.minor.build.revision'"
  531. raw = connection.exec_driver_sql(
  532. "SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR)"
  533. ).scalar()
  534. except exc.DBAPIError:
  535. # SQL Server docs indicate this function isn't present prior to
  536. # 2008. Before we had the VARCHAR cast above, pyodbc would also
  537. # fail on this query.
  538. return super()._get_server_version_info(connection)
  539. else:
  540. version = []
  541. r = re.compile(r"[.\-]")
  542. for n in r.split(raw):
  543. try:
  544. version.append(int(n))
  545. except ValueError:
  546. pass
  547. return tuple(version)
  548. def on_connect(self):
  549. super_ = super().on_connect()
  550. def on_connect(conn):
  551. if super_ is not None:
  552. super_(conn)
  553. self._setup_timestampoffset_type(conn)
  554. return on_connect
  555. def _setup_timestampoffset_type(self, connection):
  556. # output converter function for datetimeoffset
  557. def _handle_datetimeoffset(dto_value):
  558. tup = struct.unpack("<6hI2h", dto_value)
  559. return datetime.datetime(
  560. tup[0],
  561. tup[1],
  562. tup[2],
  563. tup[3],
  564. tup[4],
  565. tup[5],
  566. tup[6] // 1000,
  567. datetime.timezone(
  568. datetime.timedelta(hours=tup[7], minutes=tup[8])
  569. ),
  570. )
  571. odbc_SQL_SS_TIMESTAMPOFFSET = -155 # as defined in SQLNCLI.h
  572. connection.add_output_converter(
  573. odbc_SQL_SS_TIMESTAMPOFFSET, _handle_datetimeoffset
  574. )
  575. def do_executemany(self, cursor, statement, parameters, context=None):
  576. if self.fast_executemany:
  577. cursor.fast_executemany = True
  578. super().do_executemany(cursor, statement, parameters, context=context)
  579. def is_disconnect(self, e, connection, cursor):
  580. if isinstance(e, self.dbapi.Error):
  581. code = e.args[0]
  582. if code in {
  583. "08S01",
  584. "01000",
  585. "01002",
  586. "08003",
  587. "08007",
  588. "08S02",
  589. "08001",
  590. "HYT00",
  591. "HY010",
  592. "10054",
  593. }:
  594. return True
  595. return super().is_disconnect(e, connection, cursor)
  596. dialect = MSDialect_pyodbc