base.py 136 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802
  1. # dialects/oracle/base.py
  2. # Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
  3. # <see AUTHORS file>
  4. #
  5. # This module is part of SQLAlchemy and is released under
  6. # the MIT License: https://www.opensource.org/licenses/mit-license.php
  7. # mypy: ignore-errors
  8. r"""
  9. .. dialect:: oracle
  10. :name: Oracle Database
  11. :normal_support: 11+
  12. :best_effort: 9+
  13. Auto Increment Behavior
  14. -----------------------
  15. SQLAlchemy Table objects which include integer primary keys are usually assumed
  16. to have "autoincrementing" behavior, meaning they can generate their own
  17. primary key values upon INSERT. For use within Oracle Database, two options are
  18. available, which are the use of IDENTITY columns (Oracle Database 12 and above
  19. only) or the association of a SEQUENCE with the column.
  20. Specifying GENERATED AS IDENTITY (Oracle Database 12 and above)
  21. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  22. Starting from version 12, Oracle Database can make use of identity columns
  23. using the :class:`_sql.Identity` to specify the autoincrementing behavior::
  24. t = Table(
  25. "mytable",
  26. metadata,
  27. Column("id", Integer, Identity(start=3), primary_key=True),
  28. Column(...),
  29. ...,
  30. )
  31. The CREATE TABLE for the above :class:`_schema.Table` object would be:
  32. .. sourcecode:: sql
  33. CREATE TABLE mytable (
  34. id INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 3),
  35. ...,
  36. PRIMARY KEY (id)
  37. )
  38. The :class:`_schema.Identity` object support many options to control the
  39. "autoincrementing" behavior of the column, like the starting value, the
  40. incrementing value, etc. In addition to the standard options, Oracle Database
  41. supports setting :paramref:`_schema.Identity.always` to ``None`` to use the
  42. default generated mode, rendering GENERATED AS IDENTITY in the DDL. It also supports
  43. setting :paramref:`_schema.Identity.on_null` to ``True`` to specify ON NULL
  44. in conjunction with a 'BY DEFAULT' identity column.
  45. Using a SEQUENCE (all Oracle Database versions)
  46. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  47. Older version of Oracle Database had no "autoincrement" feature: SQLAlchemy
  48. relies upon sequences to produce these values. With the older Oracle Database
  49. versions, *a sequence must always be explicitly specified to enable
  50. autoincrement*. This is divergent with the majority of documentation examples
  51. which assume the usage of an autoincrement-capable database. To specify
  52. sequences, use the sqlalchemy.schema.Sequence object which is passed to a
  53. Column construct::
  54. t = Table(
  55. "mytable",
  56. metadata,
  57. Column("id", Integer, Sequence("id_seq", start=1), primary_key=True),
  58. Column(...),
  59. ...,
  60. )
  61. This step is also required when using table reflection, i.e. autoload_with=engine::
  62. t = Table(
  63. "mytable",
  64. metadata,
  65. Column("id", Integer, Sequence("id_seq", start=1), primary_key=True),
  66. autoload_with=engine,
  67. )
  68. .. versionchanged:: 1.4 Added :class:`_schema.Identity` construct
  69. in a :class:`_schema.Column` to specify the option of an autoincrementing
  70. column.
  71. .. _oracle_isolation_level:
  72. Transaction Isolation Level / Autocommit
  73. ----------------------------------------
  74. Oracle Database supports "READ COMMITTED" and "SERIALIZABLE" modes of
  75. isolation. The AUTOCOMMIT isolation level is also supported by the
  76. python-oracledb and cx_Oracle dialects.
  77. To set using per-connection execution options::
  78. connection = engine.connect()
  79. connection = connection.execution_options(isolation_level="AUTOCOMMIT")
  80. For ``READ COMMITTED`` and ``SERIALIZABLE``, the Oracle Database dialects sets
  81. the level at the session level using ``ALTER SESSION``, which is reverted back
  82. to its default setting when the connection is returned to the connection pool.
  83. Valid values for ``isolation_level`` include:
  84. * ``READ COMMITTED``
  85. * ``AUTOCOMMIT``
  86. * ``SERIALIZABLE``
  87. .. note:: The implementation for the
  88. :meth:`_engine.Connection.get_isolation_level` method as implemented by the
  89. Oracle Database dialects necessarily force the start of a transaction using the
  90. Oracle Database DBMS_TRANSACTION.LOCAL_TRANSACTION_ID function; otherwise no
  91. level is normally readable.
  92. Additionally, the :meth:`_engine.Connection.get_isolation_level` method will
  93. raise an exception if the ``v$transaction`` view is not available due to
  94. permissions or other reasons, which is a common occurrence in Oracle Database
  95. installations.
  96. The python-oracledb and cx_Oracle dialects attempt to call the
  97. :meth:`_engine.Connection.get_isolation_level` method when the dialect makes
  98. its first connection to the database in order to acquire the
  99. "default"isolation level. This default level is necessary so that the level
  100. can be reset on a connection after it has been temporarily modified using
  101. :meth:`_engine.Connection.execution_options` method. In the common event
  102. that the :meth:`_engine.Connection.get_isolation_level` method raises an
  103. exception due to ``v$transaction`` not being readable as well as any other
  104. database-related failure, the level is assumed to be "READ COMMITTED". No
  105. warning is emitted for this initial first-connect condition as it is
  106. expected to be a common restriction on Oracle databases.
  107. .. versionadded:: 1.3.16 added support for AUTOCOMMIT to the cx_Oracle dialect
  108. as well as the notion of a default isolation level
  109. .. versionadded:: 1.3.21 Added support for SERIALIZABLE as well as live
  110. reading of the isolation level.
  111. .. versionchanged:: 1.3.22 In the event that the default isolation
  112. level cannot be read due to permissions on the v$transaction view as
  113. is common in Oracle installations, the default isolation level is hardcoded
  114. to "READ COMMITTED" which was the behavior prior to 1.3.21.
  115. .. seealso::
  116. :ref:`dbapi_autocommit`
  117. Identifier Casing
  118. -----------------
  119. In Oracle Database, the data dictionary represents all case insensitive
  120. identifier names using UPPERCASE text. This is in contradiction to the
  121. expectations of SQLAlchemy, which assume a case insensitive name is represented
  122. as lowercase text.
  123. As an example of case insensitive identifier names, consider the following table:
  124. .. sourcecode:: sql
  125. CREATE TABLE MyTable (Identifier INTEGER PRIMARY KEY)
  126. If you were to ask Oracle Database for information about this table, the
  127. table name would be reported as ``MYTABLE`` and the column name would
  128. be reported as ``IDENTIFIER``. Compare to most other databases such as
  129. PostgreSQL and MySQL which would report these names as ``mytable`` and
  130. ``identifier``. The names are **not quoted, therefore are case insensitive**.
  131. The special casing of ``MyTable`` and ``Identifier`` would only be maintained
  132. if they were quoted in the table definition:
  133. .. sourcecode:: sql
  134. CREATE TABLE "MyTable" ("Identifier" INTEGER PRIMARY KEY)
  135. When constructing a SQLAlchemy :class:`.Table` object, **an all lowercase name
  136. is considered to be case insensitive**. So the following table assumes
  137. case insensitive names::
  138. Table("mytable", metadata, Column("identifier", Integer, primary_key=True))
  139. Whereas when mixed case or UPPERCASE names are used, case sensitivity is
  140. assumed::
  141. Table("MyTable", metadata, Column("Identifier", Integer, primary_key=True))
  142. A similar situation occurs at the database driver level when emitting a
  143. textual SQL SELECT statement and looking at column names in the DBAPI
  144. ``cursor.description`` attribute. A database like PostgreSQL will normalize
  145. case insensitive names to be lowercase::
  146. >>> pg_engine = create_engine("postgresql://scott:tiger@localhost/test")
  147. >>> pg_connection = pg_engine.connect()
  148. >>> result = pg_connection.exec_driver_sql("SELECT 1 AS SomeName")
  149. >>> result.cursor.description
  150. (Column(name='somename', type_code=23),)
  151. Whereas Oracle normalizes them to UPPERCASE::
  152. >>> oracle_engine = create_engine("oracle+oracledb://scott:tiger@oracle18c/xe")
  153. >>> oracle_connection = oracle_engine.connect()
  154. >>> result = oracle_connection.exec_driver_sql(
  155. ... "SELECT 1 AS SomeName FROM DUAL"
  156. ... )
  157. >>> result.cursor.description
  158. [('SOMENAME', <DbType DB_TYPE_NUMBER>, 127, None, 0, -127, True)]
  159. In order to achieve cross-database parity for the two cases of a. table
  160. reflection and b. textual-only SQL statement round trips, SQLAlchemy performs a step
  161. called **name normalization** when using the Oracle dialect. This process may
  162. also apply to other third party dialects that have similar UPPERCASE handling
  163. of case insensitive names.
  164. When using name normalization, SQLAlchemy attempts to detect if a name is
  165. case insensitive by checking if all characters are UPPERCASE letters only;
  166. if so, then it assumes this is a case insensitive name and is delivered as
  167. a lowercase name.
  168. For table reflection, a tablename that is seen represented as all UPPERCASE
  169. in Oracle Database's catalog tables will be assumed to have a case insensitive
  170. name. This is what allows the ``Table`` definition to use lower case names
  171. and be equally compatible from a reflection point of view on Oracle Database
  172. and all other databases such as PostgreSQL and MySQL::
  173. # matches a table created with CREATE TABLE mytable
  174. Table("mytable", metadata, autoload_with=some_engine)
  175. Above, the all lowercase name ``"mytable"`` is case insensitive; it will match
  176. a table reported by PostgreSQL as ``"mytable"`` and a table reported by
  177. Oracle as ``"MYTABLE"``. If name normalization were not present, it would
  178. not be possible for the above :class:`.Table` definition to be introspectable
  179. in a cross-database way, since we are dealing with a case insensitive name
  180. that is not reported by each database in the same way.
  181. Case sensitivity can be forced on in this case, such as if we wanted to represent
  182. the quoted tablename ``"MYTABLE"`` with that exact casing, most simply by using
  183. that casing directly, which will be seen as a case sensitive name::
  184. # matches a table created with CREATE TABLE "MYTABLE"
  185. Table("MYTABLE", metadata, autoload_with=some_engine)
  186. For the unusual case of a quoted all-lowercase name, the :class:`.quoted_name`
  187. construct may be used::
  188. from sqlalchemy import quoted_name
  189. # matches a table created with CREATE TABLE "mytable"
  190. Table(
  191. quoted_name("mytable", quote=True), metadata, autoload_with=some_engine
  192. )
  193. Name normalization also takes place when handling result sets from **purely
  194. textual SQL strings**, that have no other :class:`.Table` or :class:`.Column`
  195. metadata associated with them. This includes SQL strings executed using
  196. :meth:`.Connection.exec_driver_sql` and SQL strings executed using the
  197. :func:`.text` construct which do not include :class:`.Column` metadata.
  198. Returning to the Oracle Database SELECT statement, we see that even though
  199. ``cursor.description`` reports the column name as ``SOMENAME``, SQLAlchemy
  200. name normalizes this to ``somename``::
  201. >>> oracle_engine = create_engine("oracle+oracledb://scott:tiger@oracle18c/xe")
  202. >>> oracle_connection = oracle_engine.connect()
  203. >>> result = oracle_connection.exec_driver_sql(
  204. ... "SELECT 1 AS SomeName FROM DUAL"
  205. ... )
  206. >>> result.cursor.description
  207. [('SOMENAME', <DbType DB_TYPE_NUMBER>, 127, None, 0, -127, True)]
  208. >>> result.keys()
  209. RMKeyView(['somename'])
  210. The single scenario where the above behavior produces inaccurate results
  211. is when using an all-uppercase, quoted name. SQLAlchemy has no way to determine
  212. that a particular name in ``cursor.description`` was quoted, and is therefore
  213. case sensitive, or was not quoted, and should be name normalized::
  214. >>> result = oracle_connection.exec_driver_sql(
  215. ... 'SELECT 1 AS "SOMENAME" FROM DUAL'
  216. ... )
  217. >>> result.cursor.description
  218. [('SOMENAME', <DbType DB_TYPE_NUMBER>, 127, None, 0, -127, True)]
  219. >>> result.keys()
  220. RMKeyView(['somename'])
  221. For this case, a new feature will be available in SQLAlchemy 2.1 to disable
  222. the name normalization behavior in specific cases.
  223. .. _oracle_max_identifier_lengths:
  224. Maximum Identifier Lengths
  225. --------------------------
  226. SQLAlchemy is sensitive to the maximum identifier length supported by Oracle
  227. Database. This affects generated SQL label names as well as the generation of
  228. constraint names, particularly in the case where the constraint naming
  229. convention feature described at :ref:`constraint_naming_conventions` is being
  230. used.
  231. Oracle Database 12.2 increased the default maximum identifier length from 30 to
  232. 128. As of SQLAlchemy 1.4, the default maximum identifier length for the Oracle
  233. dialects is 128 characters. Upon first connection, the maximum length actually
  234. supported by the database is obtained. In all cases, setting the
  235. :paramref:`_sa.create_engine.max_identifier_length` parameter will bypass this
  236. change and the value given will be used as is::
  237. engine = create_engine(
  238. "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1",
  239. max_identifier_length=30,
  240. )
  241. If :paramref:`_sa.create_engine.max_identifier_length` is not set, the oracledb
  242. dialect internally uses the ``max_identifier_length`` attribute available on
  243. driver connections since python-oracledb version 2.5. When using an older
  244. driver version, or using the cx_Oracle dialect, SQLAlchemy will instead attempt
  245. to use the query ``SELECT value FROM v$parameter WHERE name = 'compatible'``
  246. upon first connect in order to determine the effective compatibility version of
  247. the database. The "compatibility" version is a version number that is
  248. independent of the actual database version. It is used to assist database
  249. migration. It is configured by an Oracle Database initialization parameter. The
  250. compatibility version then determines the maximum allowed identifier length for
  251. the database. If the V$ view is not available, the database version information
  252. is used instead.
  253. The maximum identifier length comes into play both when generating anonymized
  254. SQL labels in SELECT statements, but more crucially when generating constraint
  255. names from a naming convention. It is this area that has created the need for
  256. SQLAlchemy to change this default conservatively. For example, the following
  257. naming convention produces two very different constraint names based on the
  258. identifier length::
  259. from sqlalchemy import Column
  260. from sqlalchemy import Index
  261. from sqlalchemy import Integer
  262. from sqlalchemy import MetaData
  263. from sqlalchemy import Table
  264. from sqlalchemy.dialects import oracle
  265. from sqlalchemy.schema import CreateIndex
  266. m = MetaData(naming_convention={"ix": "ix_%(column_0N_name)s"})
  267. t = Table(
  268. "t",
  269. m,
  270. Column("some_column_name_1", Integer),
  271. Column("some_column_name_2", Integer),
  272. Column("some_column_name_3", Integer),
  273. )
  274. ix = Index(
  275. None,
  276. t.c.some_column_name_1,
  277. t.c.some_column_name_2,
  278. t.c.some_column_name_3,
  279. )
  280. oracle_dialect = oracle.dialect(max_identifier_length=30)
  281. print(CreateIndex(ix).compile(dialect=oracle_dialect))
  282. With an identifier length of 30, the above CREATE INDEX looks like:
  283. .. sourcecode:: sql
  284. CREATE INDEX ix_some_column_name_1s_70cd ON t
  285. (some_column_name_1, some_column_name_2, some_column_name_3)
  286. However with length of 128, it becomes::
  287. .. sourcecode:: sql
  288. CREATE INDEX ix_some_column_name_1some_column_name_2some_column_name_3 ON t
  289. (some_column_name_1, some_column_name_2, some_column_name_3)
  290. Applications which have run versions of SQLAlchemy prior to 1.4 on Oracle
  291. Database version 12.2 or greater are therefore subject to the scenario of a
  292. database migration that wishes to "DROP CONSTRAINT" on a name that was
  293. previously generated with the shorter length. This migration will fail when
  294. the identifier length is changed without the name of the index or constraint
  295. first being adjusted. Such applications are strongly advised to make use of
  296. :paramref:`_sa.create_engine.max_identifier_length` in order to maintain
  297. control of the generation of truncated names, and to fully review and test all
  298. database migrations in a staging environment when changing this value to ensure
  299. that the impact of this change has been mitigated.
  300. .. versionchanged:: 1.4 the default max_identifier_length for Oracle Database
  301. is 128 characters, which is adjusted down to 30 upon first connect if the
  302. Oracle Database, or its compatibility setting, are lower than version 12.2.
  303. LIMIT/OFFSET/FETCH Support
  304. --------------------------
  305. Methods like :meth:`_sql.Select.limit` and :meth:`_sql.Select.offset` make use
  306. of ``FETCH FIRST N ROW / OFFSET N ROWS`` syntax assuming Oracle Database 12c or
  307. above, and assuming the SELECT statement is not embedded within a compound
  308. statement like UNION. This syntax is also available directly by using the
  309. :meth:`_sql.Select.fetch` method.
  310. .. versionchanged:: 2.0 the Oracle Database dialects now use ``FETCH FIRST N
  311. ROW / OFFSET N ROWS`` for all :meth:`_sql.Select.limit` and
  312. :meth:`_sql.Select.offset` usage including within the ORM and legacy
  313. :class:`_orm.Query`. To force the legacy behavior using window functions,
  314. specify the ``enable_offset_fetch=False`` dialect parameter to
  315. :func:`_sa.create_engine`.
  316. The use of ``FETCH FIRST / OFFSET`` may be disabled on any Oracle Database
  317. version by passing ``enable_offset_fetch=False`` to :func:`_sa.create_engine`,
  318. which will force the use of "legacy" mode that makes use of window functions.
  319. This mode is also selected automatically when using a version of Oracle
  320. Database prior to 12c.
  321. When using legacy mode, or when a :class:`.Select` statement with limit/offset
  322. is embedded in a compound statement, an emulated approach for LIMIT / OFFSET
  323. based on window functions is used, which involves creation of a subquery using
  324. ``ROW_NUMBER`` that is prone to performance issues as well as SQL construction
  325. issues for complex statements. However, this approach is supported by all
  326. Oracle Database versions. See notes below.
  327. Notes on LIMIT / OFFSET emulation (when fetch() method cannot be used)
  328. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  329. If using :meth:`_sql.Select.limit` and :meth:`_sql.Select.offset`, or with the
  330. ORM the :meth:`_orm.Query.limit` and :meth:`_orm.Query.offset` methods on an
  331. Oracle Database version prior to 12c, the following notes apply:
  332. * SQLAlchemy currently makes use of ROWNUM to achieve
  333. LIMIT/OFFSET; the exact methodology is taken from
  334. https://blogs.oracle.com/oraclemagazine/on-rownum-and-limiting-results .
  335. * the "FIRST_ROWS()" optimization keyword is not used by default. To enable
  336. the usage of this optimization directive, specify ``optimize_limits=True``
  337. to :func:`_sa.create_engine`.
  338. .. versionchanged:: 1.4
  339. The Oracle Database dialect renders limit/offset integer values using a
  340. "post compile" scheme which renders the integer directly before passing
  341. the statement to the cursor for execution. The ``use_binds_for_limits``
  342. flag no longer has an effect.
  343. .. seealso::
  344. :ref:`change_4808`.
  345. .. _oracle_returning:
  346. RETURNING Support
  347. -----------------
  348. Oracle Database supports RETURNING fully for INSERT, UPDATE and DELETE
  349. statements that are invoked with a single collection of bound parameters (that
  350. is, a ``cursor.execute()`` style statement; SQLAlchemy does not generally
  351. support RETURNING with :term:`executemany` statements). Multiple rows may be
  352. returned as well.
  353. .. versionchanged:: 2.0 the Oracle Database backend has full support for
  354. RETURNING on parity with other backends.
  355. ON UPDATE CASCADE
  356. -----------------
  357. Oracle Database doesn't have native ON UPDATE CASCADE functionality. A trigger
  358. based solution is available at
  359. https://web.archive.org/web/20090317041251/https://asktom.oracle.com/tkyte/update_cascade/index.html
  360. When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
  361. cascading updates - specify ForeignKey objects using the
  362. "deferrable=True, initially='deferred'" keyword arguments,
  363. and specify "passive_updates=False" on each relationship().
  364. Oracle Database 8 Compatibility
  365. -------------------------------
  366. .. warning:: The status of Oracle Database 8 compatibility is not known for
  367. SQLAlchemy 2.0.
  368. When Oracle Database 8 is detected, the dialect internally configures itself to
  369. the following behaviors:
  370. * the use_ansi flag is set to False. This has the effect of converting all
  371. JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
  372. makes use of Oracle's (+) operator.
  373. * the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
  374. the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued
  375. instead. This because these types don't seem to work correctly on Oracle 8
  376. even though they are available. The :class:`~sqlalchemy.types.NVARCHAR` and
  377. :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate
  378. NVARCHAR2 and NCLOB.
  379. Synonym/DBLINK Reflection
  380. -------------------------
  381. When using reflection with Table objects, the dialect can optionally search
  382. for tables indicated by synonyms, either in local or remote schemas or
  383. accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as
  384. a keyword argument to the :class:`_schema.Table` construct::
  385. some_table = Table(
  386. "some_table", autoload_with=some_engine, oracle_resolve_synonyms=True
  387. )
  388. When this flag is set, the given name (such as ``some_table`` above) will be
  389. searched not just in the ``ALL_TABLES`` view, but also within the
  390. ``ALL_SYNONYMS`` view to see if this name is actually a synonym to another
  391. name. If the synonym is located and refers to a DBLINK, the Oracle Database
  392. dialects know how to locate the table's information using DBLINK syntax(e.g.
  393. ``@dblink``).
  394. ``oracle_resolve_synonyms`` is accepted wherever reflection arguments are
  395. accepted, including methods such as :meth:`_schema.MetaData.reflect` and
  396. :meth:`_reflection.Inspector.get_columns`.
  397. If synonyms are not in use, this flag should be left disabled.
  398. .. _oracle_constraint_reflection:
  399. Constraint Reflection
  400. ---------------------
  401. The Oracle Database dialects can return information about foreign key, unique,
  402. and CHECK constraints, as well as indexes on tables.
  403. Raw information regarding these constraints can be acquired using
  404. :meth:`_reflection.Inspector.get_foreign_keys`,
  405. :meth:`_reflection.Inspector.get_unique_constraints`,
  406. :meth:`_reflection.Inspector.get_check_constraints`, and
  407. :meth:`_reflection.Inspector.get_indexes`.
  408. .. versionchanged:: 1.2 The Oracle Database dialect can now reflect UNIQUE and
  409. CHECK constraints.
  410. When using reflection at the :class:`_schema.Table` level, the
  411. :class:`_schema.Table`
  412. will also include these constraints.
  413. Note the following caveats:
  414. * When using the :meth:`_reflection.Inspector.get_check_constraints` method,
  415. Oracle Database builds a special "IS NOT NULL" constraint for columns that
  416. specify "NOT NULL". This constraint is **not** returned by default; to
  417. include the "IS NOT NULL" constraints, pass the flag ``include_all=True``::
  418. from sqlalchemy import create_engine, inspect
  419. engine = create_engine(
  420. "oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
  421. )
  422. inspector = inspect(engine)
  423. all_check_constraints = inspector.get_check_constraints(
  424. "some_table", include_all=True
  425. )
  426. * in most cases, when reflecting a :class:`_schema.Table`, a UNIQUE constraint
  427. will **not** be available as a :class:`.UniqueConstraint` object, as Oracle
  428. Database mirrors unique constraints with a UNIQUE index in most cases (the
  429. exception seems to be when two or more unique constraints represent the same
  430. columns); the :class:`_schema.Table` will instead represent these using
  431. :class:`.Index` with the ``unique=True`` flag set.
  432. * Oracle Database creates an implicit index for the primary key of a table;
  433. this index is **excluded** from all index results.
  434. * the list of columns reflected for an index will not include column names
  435. that start with SYS_NC.
  436. Table names with SYSTEM/SYSAUX tablespaces
  437. -------------------------------------------
  438. The :meth:`_reflection.Inspector.get_table_names` and
  439. :meth:`_reflection.Inspector.get_temp_table_names`
  440. methods each return a list of table names for the current engine. These methods
  441. are also part of the reflection which occurs within an operation such as
  442. :meth:`_schema.MetaData.reflect`. By default,
  443. these operations exclude the ``SYSTEM``
  444. and ``SYSAUX`` tablespaces from the operation. In order to change this, the
  445. default list of tablespaces excluded can be changed at the engine level using
  446. the ``exclude_tablespaces`` parameter::
  447. # exclude SYSAUX and SOME_TABLESPACE, but not SYSTEM
  448. e = create_engine(
  449. "oracle+oracledb://scott:tiger@localhost:1521/?service_name=freepdb1",
  450. exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"],
  451. )
  452. .. _oracle_float_support:
  453. FLOAT / DOUBLE Support and Behaviors
  454. ------------------------------------
  455. The SQLAlchemy :class:`.Float` and :class:`.Double` datatypes are generic
  456. datatypes that resolve to the "least surprising" datatype for a given backend.
  457. For Oracle Database, this means they resolve to the ``FLOAT`` and ``DOUBLE``
  458. types::
  459. >>> from sqlalchemy import cast, literal, Float
  460. >>> from sqlalchemy.dialects import oracle
  461. >>> float_datatype = Float()
  462. >>> print(cast(literal(5.0), float_datatype).compile(dialect=oracle.dialect()))
  463. CAST(:param_1 AS FLOAT)
  464. Oracle's ``FLOAT`` / ``DOUBLE`` datatypes are aliases for ``NUMBER``. Oracle
  465. Database stores ``NUMBER`` values with full precision, not floating point
  466. precision, which means that ``FLOAT`` / ``DOUBLE`` do not actually behave like
  467. native FP values. Oracle Database instead offers special datatypes
  468. ``BINARY_FLOAT`` and ``BINARY_DOUBLE`` to deliver real 4- and 8- byte FP
  469. values.
  470. SQLAlchemy supports these datatypes directly using :class:`.BINARY_FLOAT` and
  471. :class:`.BINARY_DOUBLE`. To use the :class:`.Float` or :class:`.Double`
  472. datatypes in a database agnostic way, while allowing Oracle backends to utilize
  473. one of these types, use the :meth:`.TypeEngine.with_variant` method to set up a
  474. variant::
  475. >>> from sqlalchemy import cast, literal, Float
  476. >>> from sqlalchemy.dialects import oracle
  477. >>> float_datatype = Float().with_variant(oracle.BINARY_FLOAT(), "oracle")
  478. >>> print(cast(literal(5.0), float_datatype).compile(dialect=oracle.dialect()))
  479. CAST(:param_1 AS BINARY_FLOAT)
  480. E.g. to use this datatype in a :class:`.Table` definition::
  481. my_table = Table(
  482. "my_table",
  483. metadata,
  484. Column(
  485. "fp_data", Float().with_variant(oracle.BINARY_FLOAT(), "oracle")
  486. ),
  487. )
  488. DateTime Compatibility
  489. ----------------------
  490. Oracle Database has no datatype known as ``DATETIME``, it instead has only
  491. ``DATE``, which can actually store a date and time value. For this reason, the
  492. Oracle Database dialects provide a type :class:`_oracle.DATE` which is a
  493. subclass of :class:`.DateTime`. This type has no special behavior, and is only
  494. present as a "marker" for this type; additionally, when a database column is
  495. reflected and the type is reported as ``DATE``, the time-supporting
  496. :class:`_oracle.DATE` type is used.
  497. .. _oracle_table_options:
  498. Oracle Database Table Options
  499. -----------------------------
  500. The CREATE TABLE phrase supports the following options with Oracle Database
  501. dialects in conjunction with the :class:`_schema.Table` construct:
  502. * ``ON COMMIT``::
  503. Table(
  504. "some_table",
  505. metadata,
  506. ...,
  507. prefixes=["GLOBAL TEMPORARY"],
  508. oracle_on_commit="PRESERVE ROWS",
  509. )
  510. *
  511. ``COMPRESS``::
  512. Table(
  513. "mytable", metadata, Column("data", String(32)), oracle_compress=True
  514. )
  515. Table("mytable", metadata, Column("data", String(32)), oracle_compress=6)
  516. The ``oracle_compress`` parameter accepts either an integer compression
  517. level, or ``True`` to use the default compression level.
  518. *
  519. ``TABLESPACE``::
  520. Table("mytable", metadata, ..., oracle_tablespace="EXAMPLE_TABLESPACE")
  521. The ``oracle_tablespace`` parameter specifies the tablespace in which the
  522. table is to be created. This is useful when you want to create a table in a
  523. tablespace other than the default tablespace of the user.
  524. .. versionadded:: 2.0.37
  525. .. _oracle_index_options:
  526. Oracle Database Specific Index Options
  527. --------------------------------------
  528. Bitmap Indexes
  529. ~~~~~~~~~~~~~~
  530. You can specify the ``oracle_bitmap`` parameter to create a bitmap index
  531. instead of a B-tree index::
  532. Index("my_index", my_table.c.data, oracle_bitmap=True)
  533. Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not
  534. check for such limitations, only the database will.
  535. Index compression
  536. ~~~~~~~~~~~~~~~~~
  537. Oracle Database has a more efficient storage mode for indexes containing lots
  538. of repeated values. Use the ``oracle_compress`` parameter to turn on key
  539. compression::
  540. Index("my_index", my_table.c.data, oracle_compress=True)
  541. Index(
  542. "my_index",
  543. my_table.c.data1,
  544. my_table.c.data2,
  545. unique=True,
  546. oracle_compress=1,
  547. )
  548. The ``oracle_compress`` parameter accepts either an integer specifying the
  549. number of prefix columns to compress, or ``True`` to use the default (all
  550. columns for non-unique indexes, all but the last column for unique indexes).
  551. .. _oracle_vector_datatype:
  552. VECTOR Datatype
  553. ---------------
  554. Oracle Database 23ai introduced a new VECTOR datatype for artificial intelligence
  555. and machine learning search operations. The VECTOR datatype is a homogeneous array
  556. of 8-bit signed integers, 8-bit unsigned integers (binary), 32-bit floating-point
  557. numbers, or 64-bit floating-point numbers.
  558. A vector's storage type can be either DENSE or SPARSE. A dense vector contains
  559. meaningful values in most or all of its dimensions. In contrast, a sparse vector
  560. has non-zero values in only a few dimensions, with the majority being zero.
  561. Sparse vectors are represented by the total number of vector dimensions, an array
  562. of indices, and an array of values where each value’s location in the vector is
  563. indicated by the corresponding indices array position. All other vector values are
  564. treated as zero.
  565. The storage formats that can be used with sparse vectors are float32, float64, and
  566. int8. Note that the binary storage format cannot be used with sparse vectors.
  567. Sparse vectors are supported when you are using Oracle Database 23.7 or later.
  568. .. seealso::
  569. `Using VECTOR Data
  570. <https://python-oracledb.readthedocs.io/en/latest/user_guide/vector_data_type.html>`_ - in the documentation
  571. for the :ref:`oracledb` driver.
  572. .. versionadded:: 2.0.41 - Added VECTOR datatype
  573. .. versionadded:: 2.0.43 - Added DENSE/SPARSE support
  574. CREATE TABLE support for VECTOR
  575. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  576. With the :class:`.VECTOR` datatype, you can specify the number of dimensions,
  577. the storage format, and the storage type for the data. Valid values for the
  578. storage format are enum members of :class:`.VectorStorageFormat`. Valid values
  579. for the storage type are enum members of :class:`.VectorStorageType`. If
  580. storage type is not specified, a DENSE vector is created by default.
  581. To create a table that includes a :class:`.VECTOR` column::
  582. from sqlalchemy.dialects.oracle import (
  583. VECTOR,
  584. VectorStorageFormat,
  585. VectorStorageType,
  586. )
  587. t = Table(
  588. "t1",
  589. metadata,
  590. Column("id", Integer, primary_key=True),
  591. Column(
  592. "embedding",
  593. VECTOR(
  594. dim=3,
  595. storage_format=VectorStorageFormat.FLOAT32,
  596. storage_type=VectorStorageType.SPARSE,
  597. ),
  598. ),
  599. Column(...),
  600. ...,
  601. )
  602. Vectors can also be defined with an arbitrary number of dimensions and formats.
  603. This allows you to specify vectors of different dimensions with the various
  604. storage formats mentioned below.
  605. **Examples**
  606. * In this case, the storage format is flexible, allowing any vector type data to be
  607. inserted, such as INT8 or BINARY etc::
  608. vector_col: Mapped[array.array] = mapped_column(VECTOR(dim=3))
  609. * The dimension is flexible in this case, meaning that any dimension vector can
  610. be used::
  611. vector_col: Mapped[array.array] = mapped_column(
  612. VECTOR(storage_format=VectorStorageType.INT8)
  613. )
  614. * Both the dimensions and the storage format are flexible. It creates a DENSE vector::
  615. vector_col: Mapped[array.array] = mapped_column(VECTOR)
  616. * To create a SPARSE vector with both dimensions and the storage format as flexible,
  617. use the :attr:`.VectorStorageType.SPARSE` storage type::
  618. vector_col: Mapped[array.array] = mapped_column(
  619. VECTOR(storage_type=VectorStorageType.SPARSE)
  620. )
  621. Python Datatypes for VECTOR
  622. ~~~~~~~~~~~~~~~~~~~~~~~~~~~
  623. VECTOR data can be inserted using Python list or Python ``array.array()`` objects.
  624. Python arrays of type FLOAT (32-bit), DOUBLE (64-bit), INT (8-bit signed integers),
  625. or BINARY (8-bit unsigned integers) are used as bind values when inserting
  626. VECTOR columns::
  627. from sqlalchemy import insert, select
  628. with engine.begin() as conn:
  629. conn.execute(
  630. insert(t1),
  631. {"id": 1, "embedding": [1, 2, 3]},
  632. )
  633. Data can be inserted into a sparse vector using the :class:`_oracle.SparseVector`
  634. class, creating an object consisting of the number of dimensions, an array of indices, and a
  635. corresponding array of values::
  636. from sqlalchemy import insert, select
  637. from sqlalchemy.dialects.oracle import SparseVector
  638. sparse_val = SparseVector(10, [1, 2], array.array("d", [23.45, 221.22]))
  639. with engine.begin() as conn:
  640. conn.execute(
  641. insert(t1),
  642. {"id": 1, "embedding": sparse_val},
  643. )
  644. VECTOR Indexes
  645. ~~~~~~~~~~~~~~
  646. The VECTOR feature supports an Oracle-specific parameter ``oracle_vector``
  647. on the :class:`.Index` construct, which allows the construction of VECTOR
  648. indexes.
  649. SPARSE vectors cannot be used in the creation of vector indexes.
  650. To utilize VECTOR indexing, set the ``oracle_vector`` parameter to True to use
  651. the default values provided by Oracle. HNSW is the default indexing method::
  652. from sqlalchemy import Index
  653. Index(
  654. "vector_index",
  655. t1.c.embedding,
  656. oracle_vector=True,
  657. )
  658. The full range of parameters for vector indexes are available by using the
  659. :class:`.VectorIndexConfig` dataclass in place of a boolean; this dataclass
  660. allows full configuration of the index::
  661. Index(
  662. "hnsw_vector_index",
  663. t1.c.embedding,
  664. oracle_vector=VectorIndexConfig(
  665. index_type=VectorIndexType.HNSW,
  666. distance=VectorDistanceType.COSINE,
  667. accuracy=90,
  668. hnsw_neighbors=5,
  669. hnsw_efconstruction=20,
  670. parallel=10,
  671. ),
  672. )
  673. Index(
  674. "ivf_vector_index",
  675. t1.c.embedding,
  676. oracle_vector=VectorIndexConfig(
  677. index_type=VectorIndexType.IVF,
  678. distance=VectorDistanceType.DOT,
  679. accuracy=90,
  680. ivf_neighbor_partitions=5,
  681. ),
  682. )
  683. For complete explanation of these parameters, see the Oracle documentation linked
  684. below.
  685. .. seealso::
  686. `CREATE VECTOR INDEX <https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=GUID-B396C369-54BB-4098-A0DD-7C54B3A0D66F>`_ - in the Oracle documentation
  687. Similarity Searching
  688. ~~~~~~~~~~~~~~~~~~~~
  689. When using the :class:`_oracle.VECTOR` datatype with a :class:`.Column` or similar
  690. ORM mapped construct, additional comparison functions are available, including:
  691. * ``l2_distance``
  692. * ``cosine_distance``
  693. * ``inner_product``
  694. Example Usage::
  695. result_vector = connection.scalars(
  696. select(t1).order_by(t1.embedding.l2_distance([2, 3, 4])).limit(3)
  697. )
  698. for user in vector:
  699. print(user.id, user.embedding)
  700. FETCH APPROXIMATE support
  701. ~~~~~~~~~~~~~~~~~~~~~~~~~
  702. Approximate vector search can only be performed when all syntax and semantic
  703. rules are satisfied, the corresponding vector index is available, and the
  704. query optimizer determines to perform it. If any of these conditions are
  705. unmet, then an approximate search is not performed. In this case the query
  706. returns exact results.
  707. To enable approximate searching during similarity searches on VECTORS, the
  708. ``oracle_fetch_approximate`` parameter may be used with the :meth:`.Select.fetch`
  709. clause to add ``FETCH APPROX`` to the SELECT statement::
  710. select(users_table).fetch(5, oracle_fetch_approximate=True)
  711. """ # noqa
  712. from __future__ import annotations
  713. from collections import defaultdict
  714. from dataclasses import fields
  715. from functools import lru_cache
  716. from functools import wraps
  717. import re
  718. from . import dictionary
  719. from .types import _OracleBoolean
  720. from .types import _OracleDate
  721. from .types import BFILE
  722. from .types import BINARY_DOUBLE
  723. from .types import BINARY_FLOAT
  724. from .types import DATE
  725. from .types import FLOAT
  726. from .types import INTERVAL
  727. from .types import LONG
  728. from .types import NCLOB
  729. from .types import NUMBER
  730. from .types import NVARCHAR2 # noqa
  731. from .types import OracleRaw # noqa
  732. from .types import RAW
  733. from .types import ROWID # noqa
  734. from .types import TIMESTAMP
  735. from .types import VARCHAR2 # noqa
  736. from .vector import VECTOR
  737. from .vector import VectorIndexConfig
  738. from .vector import VectorIndexType
  739. from ... import Computed
  740. from ... import exc
  741. from ... import schema as sa_schema
  742. from ... import sql
  743. from ... import util
  744. from ...engine import default
  745. from ...engine import ObjectKind
  746. from ...engine import ObjectScope
  747. from ...engine import reflection
  748. from ...engine.reflection import ReflectionDefaults
  749. from ...sql import and_
  750. from ...sql import bindparam
  751. from ...sql import compiler
  752. from ...sql import expression
  753. from ...sql import func
  754. from ...sql import null
  755. from ...sql import or_
  756. from ...sql import select
  757. from ...sql import selectable as sa_selectable
  758. from ...sql import sqltypes
  759. from ...sql import util as sql_util
  760. from ...sql import visitors
  761. from ...sql.visitors import InternalTraversal
  762. from ...types import BLOB
  763. from ...types import CHAR
  764. from ...types import CLOB
  765. from ...types import DOUBLE_PRECISION
  766. from ...types import INTEGER
  767. from ...types import NCHAR
  768. from ...types import NVARCHAR
  769. from ...types import REAL
  770. from ...types import VARCHAR
  771. RESERVED_WORDS = set(
  772. "SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN "
  773. "DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED "
  774. "ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE "
  775. "ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE "
  776. "BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES "
  777. "AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS "
  778. "NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER "
  779. "CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR "
  780. "DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL".split()
  781. )
  782. NO_ARG_FNS = set(
  783. "UID CURRENT_DATE SYSDATE USER CURRENT_TIME CURRENT_TIMESTAMP".split()
  784. )
  785. colspecs = {
  786. sqltypes.Boolean: _OracleBoolean,
  787. sqltypes.Interval: INTERVAL,
  788. sqltypes.DateTime: DATE,
  789. sqltypes.Date: _OracleDate,
  790. }
  791. ischema_names = {
  792. "VARCHAR2": VARCHAR,
  793. "NVARCHAR2": NVARCHAR,
  794. "CHAR": CHAR,
  795. "NCHAR": NCHAR,
  796. "DATE": DATE,
  797. "NUMBER": NUMBER,
  798. "BLOB": BLOB,
  799. "BFILE": BFILE,
  800. "CLOB": CLOB,
  801. "NCLOB": NCLOB,
  802. "TIMESTAMP": TIMESTAMP,
  803. "TIMESTAMP WITH TIME ZONE": TIMESTAMP,
  804. "TIMESTAMP WITH LOCAL TIME ZONE": TIMESTAMP,
  805. "INTERVAL DAY TO SECOND": INTERVAL,
  806. "RAW": RAW,
  807. "FLOAT": FLOAT,
  808. "DOUBLE PRECISION": DOUBLE_PRECISION,
  809. "REAL": REAL,
  810. "LONG": LONG,
  811. "BINARY_DOUBLE": BINARY_DOUBLE,
  812. "BINARY_FLOAT": BINARY_FLOAT,
  813. "ROWID": ROWID,
  814. "VECTOR": VECTOR,
  815. }
  816. class OracleTypeCompiler(compiler.GenericTypeCompiler):
  817. # Note:
  818. # Oracle DATE == DATETIME
  819. # Oracle does not allow milliseconds in DATE
  820. # Oracle does not support TIME columns
  821. def visit_datetime(self, type_, **kw):
  822. return self.visit_DATE(type_, **kw)
  823. def visit_float(self, type_, **kw):
  824. return self.visit_FLOAT(type_, **kw)
  825. def visit_double(self, type_, **kw):
  826. return self.visit_DOUBLE_PRECISION(type_, **kw)
  827. def visit_unicode(self, type_, **kw):
  828. if self.dialect._use_nchar_for_unicode:
  829. return self.visit_NVARCHAR2(type_, **kw)
  830. else:
  831. return self.visit_VARCHAR2(type_, **kw)
  832. def visit_INTERVAL(self, type_, **kw):
  833. return "INTERVAL DAY%s TO SECOND%s" % (
  834. type_.day_precision is not None
  835. and "(%d)" % type_.day_precision
  836. or "",
  837. type_.second_precision is not None
  838. and "(%d)" % type_.second_precision
  839. or "",
  840. )
  841. def visit_LONG(self, type_, **kw):
  842. return "LONG"
  843. def visit_TIMESTAMP(self, type_, **kw):
  844. if getattr(type_, "local_timezone", False):
  845. return "TIMESTAMP WITH LOCAL TIME ZONE"
  846. elif type_.timezone:
  847. return "TIMESTAMP WITH TIME ZONE"
  848. else:
  849. return "TIMESTAMP"
  850. def visit_DOUBLE_PRECISION(self, type_, **kw):
  851. return self._generate_numeric(type_, "DOUBLE PRECISION", **kw)
  852. def visit_BINARY_DOUBLE(self, type_, **kw):
  853. return self._generate_numeric(type_, "BINARY_DOUBLE", **kw)
  854. def visit_BINARY_FLOAT(self, type_, **kw):
  855. return self._generate_numeric(type_, "BINARY_FLOAT", **kw)
  856. def visit_FLOAT(self, type_, **kw):
  857. kw["_requires_binary_precision"] = True
  858. return self._generate_numeric(type_, "FLOAT", **kw)
  859. def visit_NUMBER(self, type_, **kw):
  860. return self._generate_numeric(type_, "NUMBER", **kw)
  861. def _generate_numeric(
  862. self,
  863. type_,
  864. name,
  865. precision=None,
  866. scale=None,
  867. _requires_binary_precision=False,
  868. **kw,
  869. ):
  870. if precision is None:
  871. precision = getattr(type_, "precision", None)
  872. if _requires_binary_precision:
  873. binary_precision = getattr(type_, "binary_precision", None)
  874. if precision and binary_precision is None:
  875. # https://www.oracletutorial.com/oracle-basics/oracle-float/
  876. estimated_binary_precision = int(precision / 0.30103)
  877. raise exc.ArgumentError(
  878. "Oracle Database FLOAT types use 'binary precision', "
  879. "which does not convert cleanly from decimal "
  880. "'precision'. Please specify "
  881. "this type with a separate Oracle Database variant, such "
  882. f"as {type_.__class__.__name__}(precision={precision})."
  883. f"with_variant(oracle.FLOAT"
  884. f"(binary_precision="
  885. f"{estimated_binary_precision}), 'oracle'), so that the "
  886. "Oracle Database specific 'binary_precision' may be "
  887. "specified accurately."
  888. )
  889. else:
  890. precision = binary_precision
  891. if scale is None:
  892. scale = getattr(type_, "scale", None)
  893. if precision is None:
  894. return name
  895. elif scale is None:
  896. n = "%(name)s(%(precision)s)"
  897. return n % {"name": name, "precision": precision}
  898. else:
  899. n = "%(name)s(%(precision)s, %(scale)s)"
  900. return n % {"name": name, "precision": precision, "scale": scale}
  901. def visit_string(self, type_, **kw):
  902. return self.visit_VARCHAR2(type_, **kw)
  903. def visit_VARCHAR2(self, type_, **kw):
  904. return self._visit_varchar(type_, "", "2")
  905. def visit_NVARCHAR2(self, type_, **kw):
  906. return self._visit_varchar(type_, "N", "2")
  907. visit_NVARCHAR = visit_NVARCHAR2
  908. def visit_VARCHAR(self, type_, **kw):
  909. return self._visit_varchar(type_, "", "")
  910. def _visit_varchar(self, type_, n, num):
  911. if not type_.length:
  912. return "%(n)sVARCHAR%(two)s" % {"two": num, "n": n}
  913. elif not n and self.dialect._supports_char_length:
  914. varchar = "VARCHAR%(two)s(%(length)s CHAR)"
  915. return varchar % {"length": type_.length, "two": num}
  916. else:
  917. varchar = "%(n)sVARCHAR%(two)s(%(length)s)"
  918. return varchar % {"length": type_.length, "two": num, "n": n}
  919. def visit_text(self, type_, **kw):
  920. return self.visit_CLOB(type_, **kw)
  921. def visit_unicode_text(self, type_, **kw):
  922. if self.dialect._use_nchar_for_unicode:
  923. return self.visit_NCLOB(type_, **kw)
  924. else:
  925. return self.visit_CLOB(type_, **kw)
  926. def visit_large_binary(self, type_, **kw):
  927. return self.visit_BLOB(type_, **kw)
  928. def visit_big_integer(self, type_, **kw):
  929. return self.visit_NUMBER(type_, precision=19, **kw)
  930. def visit_boolean(self, type_, **kw):
  931. return self.visit_SMALLINT(type_, **kw)
  932. def visit_RAW(self, type_, **kw):
  933. if type_.length:
  934. return "RAW(%(length)s)" % {"length": type_.length}
  935. else:
  936. return "RAW"
  937. def visit_ROWID(self, type_, **kw):
  938. return "ROWID"
  939. def visit_VECTOR(self, type_, **kw):
  940. dim = type_.dim if type_.dim is not None else "*"
  941. storage_format = (
  942. type_.storage_format.value
  943. if type_.storage_format is not None
  944. else "*"
  945. )
  946. storage_type = (
  947. type_.storage_type.value if type_.storage_type is not None else "*"
  948. )
  949. return f"VECTOR({dim},{storage_format},{storage_type})"
  950. class OracleCompiler(compiler.SQLCompiler):
  951. """Oracle compiler modifies the lexical structure of Select
  952. statements to work under non-ANSI configured Oracle databases, if
  953. the use_ansi flag is False.
  954. """
  955. compound_keywords = util.update_copy(
  956. compiler.SQLCompiler.compound_keywords,
  957. {expression.CompoundSelect.EXCEPT: "MINUS"},
  958. )
  959. def __init__(self, *args, **kwargs):
  960. self.__wheres = {}
  961. super().__init__(*args, **kwargs)
  962. def visit_mod_binary(self, binary, operator, **kw):
  963. return "mod(%s, %s)" % (
  964. self.process(binary.left, **kw),
  965. self.process(binary.right, **kw),
  966. )
  967. def visit_now_func(self, fn, **kw):
  968. return "CURRENT_TIMESTAMP"
  969. def visit_char_length_func(self, fn, **kw):
  970. return "LENGTH" + self.function_argspec(fn, **kw)
  971. def visit_match_op_binary(self, binary, operator, **kw):
  972. return "CONTAINS (%s, %s)" % (
  973. self.process(binary.left),
  974. self.process(binary.right),
  975. )
  976. def visit_true(self, expr, **kw):
  977. return "1"
  978. def visit_false(self, expr, **kw):
  979. return "0"
  980. def get_cte_preamble(self, recursive):
  981. return "WITH"
  982. def get_select_hint_text(self, byfroms):
  983. return " ".join("/*+ %s */" % text for table, text in byfroms.items())
  984. def function_argspec(self, fn, **kw):
  985. if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS:
  986. return compiler.SQLCompiler.function_argspec(self, fn, **kw)
  987. else:
  988. return ""
  989. def visit_function(self, func, **kw):
  990. text = super().visit_function(func, **kw)
  991. if kw.get("asfrom", False) and func.name.lower() != "table":
  992. text = "TABLE (%s)" % text
  993. return text
  994. def visit_table_valued_column(self, element, **kw):
  995. text = super().visit_table_valued_column(element, **kw)
  996. text = text + ".COLUMN_VALUE"
  997. return text
  998. def default_from(self):
  999. """Called when a ``SELECT`` statement has no froms,
  1000. and no ``FROM`` clause is to be appended.
  1001. The Oracle compiler tacks a "FROM DUAL" to the statement.
  1002. """
  1003. return " FROM DUAL"
  1004. def visit_join(self, join, from_linter=None, **kwargs):
  1005. if self.dialect.use_ansi:
  1006. return compiler.SQLCompiler.visit_join(
  1007. self, join, from_linter=from_linter, **kwargs
  1008. )
  1009. else:
  1010. if from_linter:
  1011. from_linter.edges.add((join.left, join.right))
  1012. kwargs["asfrom"] = True
  1013. if isinstance(join.right, expression.FromGrouping):
  1014. right = join.right.element
  1015. else:
  1016. right = join.right
  1017. return (
  1018. self.process(join.left, from_linter=from_linter, **kwargs)
  1019. + ", "
  1020. + self.process(right, from_linter=from_linter, **kwargs)
  1021. )
  1022. def _get_nonansi_join_whereclause(self, froms):
  1023. clauses = []
  1024. def visit_join(join):
  1025. if join.isouter:
  1026. # https://docs.oracle.com/database/121/SQLRF/queries006.htm#SQLRF52354
  1027. # "apply the outer join operator (+) to all columns of B in
  1028. # the join condition in the WHERE clause" - that is,
  1029. # unconditionally regardless of operator or the other side
  1030. def visit_binary(binary):
  1031. if isinstance(
  1032. binary.left, expression.ColumnClause
  1033. ) and join.right.is_derived_from(binary.left.table):
  1034. binary.left = _OuterJoinColumn(binary.left)
  1035. elif isinstance(
  1036. binary.right, expression.ColumnClause
  1037. ) and join.right.is_derived_from(binary.right.table):
  1038. binary.right = _OuterJoinColumn(binary.right)
  1039. clauses.append(
  1040. visitors.cloned_traverse(
  1041. join.onclause, {}, {"binary": visit_binary}
  1042. )
  1043. )
  1044. else:
  1045. clauses.append(join.onclause)
  1046. for j in join.left, join.right:
  1047. if isinstance(j, expression.Join):
  1048. visit_join(j)
  1049. elif isinstance(j, expression.FromGrouping):
  1050. visit_join(j.element)
  1051. for f in froms:
  1052. if isinstance(f, expression.Join):
  1053. visit_join(f)
  1054. if not clauses:
  1055. return None
  1056. else:
  1057. return sql.and_(*clauses)
  1058. def visit_outer_join_column(self, vc, **kw):
  1059. return self.process(vc.column, **kw) + "(+)"
  1060. def visit_sequence(self, seq, **kw):
  1061. return self.preparer.format_sequence(seq) + ".nextval"
  1062. def get_render_as_alias_suffix(self, alias_name_text):
  1063. """Oracle doesn't like ``FROM table AS alias``"""
  1064. return " " + alias_name_text
  1065. def returning_clause(
  1066. self, stmt, returning_cols, *, populate_result_map, **kw
  1067. ):
  1068. columns = []
  1069. binds = []
  1070. for i, column in enumerate(
  1071. expression._select_iterables(returning_cols)
  1072. ):
  1073. if (
  1074. self.isupdate
  1075. and isinstance(column, sa_schema.Column)
  1076. and isinstance(column.server_default, Computed)
  1077. and not self.dialect._supports_update_returning_computed_cols
  1078. ):
  1079. util.warn(
  1080. "Computed columns don't work with Oracle Database UPDATE "
  1081. "statements that use RETURNING; the value of the column "
  1082. "*before* the UPDATE takes place is returned. It is "
  1083. "advised to not use RETURNING with an Oracle Database "
  1084. "computed column. Consider setting implicit_returning "
  1085. "to False on the Table object in order to avoid implicit "
  1086. "RETURNING clauses from being generated for this Table."
  1087. )
  1088. if column.type._has_column_expression:
  1089. col_expr = column.type.column_expression(column)
  1090. else:
  1091. col_expr = column
  1092. outparam = sql.outparam("ret_%d" % i, type_=column.type)
  1093. self.binds[outparam.key] = outparam
  1094. binds.append(
  1095. self.bindparam_string(self._truncate_bindparam(outparam))
  1096. )
  1097. # has_out_parameters would in a normal case be set to True
  1098. # as a result of the compiler visiting an outparam() object.
  1099. # in this case, the above outparam() objects are not being
  1100. # visited. Ensure the statement itself didn't have other
  1101. # outparam() objects independently.
  1102. # technically, this could be supported, but as it would be
  1103. # a very strange use case without a clear rationale, disallow it
  1104. if self.has_out_parameters:
  1105. raise exc.InvalidRequestError(
  1106. "Using explicit outparam() objects with "
  1107. "UpdateBase.returning() in the same Core DML statement "
  1108. "is not supported in the Oracle Database dialects."
  1109. )
  1110. self._oracle_returning = True
  1111. columns.append(self.process(col_expr, within_columns_clause=False))
  1112. if populate_result_map:
  1113. self._add_to_result_map(
  1114. getattr(col_expr, "name", col_expr._anon_name_label),
  1115. getattr(col_expr, "name", col_expr._anon_name_label),
  1116. (
  1117. column,
  1118. getattr(column, "name", None),
  1119. getattr(column, "key", None),
  1120. ),
  1121. column.type,
  1122. )
  1123. return "RETURNING " + ", ".join(columns) + " INTO " + ", ".join(binds)
  1124. def _row_limit_clause(self, select, **kw):
  1125. """Oracle Database 12c supports OFFSET/FETCH operators
  1126. Use it instead subquery with row_number
  1127. """
  1128. if (
  1129. select._fetch_clause is not None
  1130. or not self.dialect._supports_offset_fetch
  1131. ):
  1132. return super()._row_limit_clause(
  1133. select, use_literal_execute_for_simple_int=True, **kw
  1134. )
  1135. else:
  1136. return self.fetch_clause(
  1137. select,
  1138. fetch_clause=self._get_limit_or_fetch(select),
  1139. use_literal_execute_for_simple_int=True,
  1140. **kw,
  1141. )
  1142. def _get_limit_or_fetch(self, select):
  1143. if select._fetch_clause is None:
  1144. return select._limit_clause
  1145. else:
  1146. return select._fetch_clause
  1147. def fetch_clause(
  1148. self,
  1149. select,
  1150. fetch_clause=None,
  1151. require_offset=False,
  1152. use_literal_execute_for_simple_int=False,
  1153. **kw,
  1154. ):
  1155. text = super().fetch_clause(
  1156. select,
  1157. fetch_clause=fetch_clause,
  1158. require_offset=require_offset,
  1159. use_literal_execute_for_simple_int=(
  1160. use_literal_execute_for_simple_int
  1161. ),
  1162. **kw,
  1163. )
  1164. if select.dialect_options["oracle"]["fetch_approximate"]:
  1165. text = re.sub("FETCH FIRST", "FETCH APPROX FIRST", text)
  1166. return text
  1167. def translate_select_structure(self, select_stmt, **kwargs):
  1168. select = select_stmt
  1169. if not getattr(select, "_oracle_visit", None):
  1170. if not self.dialect.use_ansi:
  1171. froms = self._display_froms_for_select(
  1172. select, kwargs.get("asfrom", False)
  1173. )
  1174. whereclause = self._get_nonansi_join_whereclause(froms)
  1175. if whereclause is not None:
  1176. select = select.where(whereclause)
  1177. select._oracle_visit = True
  1178. # if fetch is used this is not needed
  1179. if (
  1180. select._has_row_limiting_clause
  1181. and not self.dialect._supports_offset_fetch
  1182. and select._fetch_clause is None
  1183. ):
  1184. limit_clause = select._limit_clause
  1185. offset_clause = select._offset_clause
  1186. if select._simple_int_clause(limit_clause):
  1187. limit_clause = limit_clause.render_literal_execute()
  1188. if select._simple_int_clause(offset_clause):
  1189. offset_clause = offset_clause.render_literal_execute()
  1190. # currently using form at:
  1191. # https://blogs.oracle.com/oraclemagazine/\
  1192. # on-rownum-and-limiting-results
  1193. orig_select = select
  1194. select = select._generate()
  1195. select._oracle_visit = True
  1196. # add expressions to accommodate FOR UPDATE OF
  1197. for_update = select._for_update_arg
  1198. if for_update is not None and for_update.of:
  1199. for_update = for_update._clone()
  1200. for_update._copy_internals()
  1201. for elem in for_update.of:
  1202. if not select.selected_columns.contains_column(elem):
  1203. select = select.add_columns(elem)
  1204. # Wrap the middle select and add the hint
  1205. inner_subquery = select.alias()
  1206. limitselect = sql.select(
  1207. *[
  1208. c
  1209. for c in inner_subquery.c
  1210. if orig_select.selected_columns.corresponding_column(c)
  1211. is not None
  1212. ]
  1213. )
  1214. if (
  1215. limit_clause is not None
  1216. and self.dialect.optimize_limits
  1217. and select._simple_int_clause(limit_clause)
  1218. ):
  1219. limitselect = limitselect.prefix_with(
  1220. expression.text(
  1221. "/*+ FIRST_ROWS(%s) */"
  1222. % self.process(limit_clause, **kwargs)
  1223. )
  1224. )
  1225. limitselect._oracle_visit = True
  1226. limitselect._is_wrapper = True
  1227. # add expressions to accommodate FOR UPDATE OF
  1228. if for_update is not None and for_update.of:
  1229. adapter = sql_util.ClauseAdapter(inner_subquery)
  1230. for_update.of = [
  1231. adapter.traverse(elem) for elem in for_update.of
  1232. ]
  1233. # If needed, add the limiting clause
  1234. if limit_clause is not None:
  1235. if select._simple_int_clause(limit_clause) and (
  1236. offset_clause is None
  1237. or select._simple_int_clause(offset_clause)
  1238. ):
  1239. max_row = limit_clause
  1240. if offset_clause is not None:
  1241. max_row = max_row + offset_clause
  1242. else:
  1243. max_row = limit_clause
  1244. if offset_clause is not None:
  1245. max_row = max_row + offset_clause
  1246. limitselect = limitselect.where(
  1247. sql.literal_column("ROWNUM") <= max_row
  1248. )
  1249. # If needed, add the ora_rn, and wrap again with offset.
  1250. if offset_clause is None:
  1251. limitselect._for_update_arg = for_update
  1252. select = limitselect
  1253. else:
  1254. limitselect = limitselect.add_columns(
  1255. sql.literal_column("ROWNUM").label("ora_rn")
  1256. )
  1257. limitselect._oracle_visit = True
  1258. limitselect._is_wrapper = True
  1259. if for_update is not None and for_update.of:
  1260. limitselect_cols = limitselect.selected_columns
  1261. for elem in for_update.of:
  1262. if (
  1263. limitselect_cols.corresponding_column(elem)
  1264. is None
  1265. ):
  1266. limitselect = limitselect.add_columns(elem)
  1267. limit_subquery = limitselect.alias()
  1268. origselect_cols = orig_select.selected_columns
  1269. offsetselect = sql.select(
  1270. *[
  1271. c
  1272. for c in limit_subquery.c
  1273. if origselect_cols.corresponding_column(c)
  1274. is not None
  1275. ]
  1276. )
  1277. offsetselect._oracle_visit = True
  1278. offsetselect._is_wrapper = True
  1279. if for_update is not None and for_update.of:
  1280. adapter = sql_util.ClauseAdapter(limit_subquery)
  1281. for_update.of = [
  1282. adapter.traverse(elem) for elem in for_update.of
  1283. ]
  1284. offsetselect = offsetselect.where(
  1285. sql.literal_column("ora_rn") > offset_clause
  1286. )
  1287. offsetselect._for_update_arg = for_update
  1288. select = offsetselect
  1289. return select
  1290. def limit_clause(self, select, **kw):
  1291. return ""
  1292. def visit_empty_set_expr(self, type_, **kw):
  1293. return "SELECT 1 FROM DUAL WHERE 1!=1"
  1294. def for_update_clause(self, select, **kw):
  1295. if self.is_subquery():
  1296. return ""
  1297. tmp = " FOR UPDATE"
  1298. if select._for_update_arg.of:
  1299. tmp += " OF " + ", ".join(
  1300. self.process(elem, **kw) for elem in select._for_update_arg.of
  1301. )
  1302. if select._for_update_arg.nowait:
  1303. tmp += " NOWAIT"
  1304. if select._for_update_arg.skip_locked:
  1305. tmp += " SKIP LOCKED"
  1306. return tmp
  1307. def visit_is_distinct_from_binary(self, binary, operator, **kw):
  1308. return "DECODE(%s, %s, 0, 1) = 1" % (
  1309. self.process(binary.left),
  1310. self.process(binary.right),
  1311. )
  1312. def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
  1313. return "DECODE(%s, %s, 0, 1) = 0" % (
  1314. self.process(binary.left),
  1315. self.process(binary.right),
  1316. )
  1317. def visit_regexp_match_op_binary(self, binary, operator, **kw):
  1318. string = self.process(binary.left, **kw)
  1319. pattern = self.process(binary.right, **kw)
  1320. flags = binary.modifiers["flags"]
  1321. if flags is None:
  1322. return "REGEXP_LIKE(%s, %s)" % (string, pattern)
  1323. else:
  1324. return "REGEXP_LIKE(%s, %s, %s)" % (
  1325. string,
  1326. pattern,
  1327. self.render_literal_value(flags, sqltypes.STRINGTYPE),
  1328. )
  1329. def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
  1330. return "NOT %s" % self.visit_regexp_match_op_binary(
  1331. binary, operator, **kw
  1332. )
  1333. def visit_regexp_replace_op_binary(self, binary, operator, **kw):
  1334. string = self.process(binary.left, **kw)
  1335. pattern_replace = self.process(binary.right, **kw)
  1336. flags = binary.modifiers["flags"]
  1337. if flags is None:
  1338. return "REGEXP_REPLACE(%s, %s)" % (
  1339. string,
  1340. pattern_replace,
  1341. )
  1342. else:
  1343. return "REGEXP_REPLACE(%s, %s, %s)" % (
  1344. string,
  1345. pattern_replace,
  1346. self.render_literal_value(flags, sqltypes.STRINGTYPE),
  1347. )
  1348. def visit_aggregate_strings_func(self, fn, **kw):
  1349. return "LISTAGG%s" % self.function_argspec(fn, **kw)
  1350. def _visit_bitwise(self, binary, fn_name, custom_right=None, **kw):
  1351. left = self.process(binary.left, **kw)
  1352. right = self.process(
  1353. custom_right if custom_right is not None else binary.right, **kw
  1354. )
  1355. return f"{fn_name}({left}, {right})"
  1356. def visit_bitwise_xor_op_binary(self, binary, operator, **kw):
  1357. return self._visit_bitwise(binary, "BITXOR", **kw)
  1358. def visit_bitwise_or_op_binary(self, binary, operator, **kw):
  1359. return self._visit_bitwise(binary, "BITOR", **kw)
  1360. def visit_bitwise_and_op_binary(self, binary, operator, **kw):
  1361. return self._visit_bitwise(binary, "BITAND", **kw)
  1362. def visit_bitwise_rshift_op_binary(self, binary, operator, **kw):
  1363. raise exc.CompileError("Cannot compile bitwise_rshift in oracle")
  1364. def visit_bitwise_lshift_op_binary(self, binary, operator, **kw):
  1365. raise exc.CompileError("Cannot compile bitwise_lshift in oracle")
  1366. def visit_bitwise_not_op_unary_operator(self, element, operator, **kw):
  1367. raise exc.CompileError("Cannot compile bitwise_not in oracle")
  1368. class OracleDDLCompiler(compiler.DDLCompiler):
  1369. def _build_vector_index_config(
  1370. self, vector_index_config: VectorIndexConfig
  1371. ) -> str:
  1372. parts = []
  1373. sql_param_name = {
  1374. "hnsw_neighbors": "neighbors",
  1375. "hnsw_efconstruction": "efconstruction",
  1376. "ivf_neighbor_partitions": "neighbor partitions",
  1377. "ivf_sample_per_partition": "sample_per_partition",
  1378. "ivf_min_vectors_per_partition": "min_vectors_per_partition",
  1379. }
  1380. if vector_index_config.index_type == VectorIndexType.HNSW:
  1381. parts.append("ORGANIZATION INMEMORY NEIGHBOR GRAPH")
  1382. elif vector_index_config.index_type == VectorIndexType.IVF:
  1383. parts.append("ORGANIZATION NEIGHBOR PARTITIONS")
  1384. if vector_index_config.distance is not None:
  1385. parts.append(f"DISTANCE {vector_index_config.distance.value}")
  1386. if vector_index_config.accuracy is not None:
  1387. parts.append(
  1388. f"WITH TARGET ACCURACY {vector_index_config.accuracy}"
  1389. )
  1390. parameters_str = [f"type {vector_index_config.index_type.name}"]
  1391. prefix = vector_index_config.index_type.name.lower() + "_"
  1392. for field in fields(vector_index_config):
  1393. if field.name.startswith(prefix):
  1394. key = sql_param_name.get(field.name)
  1395. value = getattr(vector_index_config, field.name)
  1396. if value is not None:
  1397. parameters_str.append(f"{key} {value}")
  1398. parameters_str = ", ".join(parameters_str)
  1399. parts.append(f"PARAMETERS ({parameters_str})")
  1400. if vector_index_config.parallel is not None:
  1401. parts.append(f"PARALLEL {vector_index_config.parallel}")
  1402. return " ".join(parts)
  1403. def define_constraint_cascades(self, constraint):
  1404. text = ""
  1405. if constraint.ondelete is not None:
  1406. text += " ON DELETE %s" % constraint.ondelete
  1407. # oracle has no ON UPDATE CASCADE -
  1408. # its only available via triggers
  1409. # https://web.archive.org/web/20090317041251/https://asktom.oracle.com/tkyte/update_cascade/index.html
  1410. if constraint.onupdate is not None:
  1411. util.warn(
  1412. "Oracle Database does not contain native UPDATE CASCADE "
  1413. "functionality - onupdates will not be rendered for foreign "
  1414. "keys. Consider using deferrable=True, initially='deferred' "
  1415. "or triggers."
  1416. )
  1417. return text
  1418. def visit_drop_table_comment(self, drop, **kw):
  1419. return "COMMENT ON TABLE %s IS ''" % self.preparer.format_table(
  1420. drop.element
  1421. )
  1422. def visit_create_index(self, create, **kw):
  1423. index = create.element
  1424. self._verify_index_table(index)
  1425. preparer = self.preparer
  1426. text = "CREATE "
  1427. if index.unique:
  1428. text += "UNIQUE "
  1429. if index.dialect_options["oracle"]["bitmap"]:
  1430. text += "BITMAP "
  1431. vector_options = index.dialect_options["oracle"]["vector"]
  1432. if vector_options:
  1433. text += "VECTOR "
  1434. text += "INDEX %s ON %s (%s)" % (
  1435. self._prepared_index_name(index, include_schema=True),
  1436. preparer.format_table(index.table, use_schema=True),
  1437. ", ".join(
  1438. self.sql_compiler.process(
  1439. expr, include_table=False, literal_binds=True
  1440. )
  1441. for expr in index.expressions
  1442. ),
  1443. )
  1444. if index.dialect_options["oracle"]["compress"] is not False:
  1445. if index.dialect_options["oracle"]["compress"] is True:
  1446. text += " COMPRESS"
  1447. else:
  1448. text += " COMPRESS %d" % (
  1449. index.dialect_options["oracle"]["compress"]
  1450. )
  1451. if vector_options:
  1452. if vector_options is True:
  1453. vector_options = VectorIndexConfig()
  1454. text += " " + self._build_vector_index_config(vector_options)
  1455. return text
  1456. def post_create_table(self, table):
  1457. table_opts = []
  1458. opts = table.dialect_options["oracle"]
  1459. if opts["on_commit"]:
  1460. on_commit_options = opts["on_commit"].replace("_", " ").upper()
  1461. table_opts.append("\n ON COMMIT %s" % on_commit_options)
  1462. if opts["compress"]:
  1463. if opts["compress"] is True:
  1464. table_opts.append("\n COMPRESS")
  1465. else:
  1466. table_opts.append("\n COMPRESS FOR %s" % (opts["compress"]))
  1467. if opts["tablespace"]:
  1468. table_opts.append(
  1469. "\n TABLESPACE %s" % self.preparer.quote(opts["tablespace"])
  1470. )
  1471. return "".join(table_opts)
  1472. def get_identity_options(self, identity_options):
  1473. text = super().get_identity_options(identity_options)
  1474. text = text.replace("NO MINVALUE", "NOMINVALUE")
  1475. text = text.replace("NO MAXVALUE", "NOMAXVALUE")
  1476. text = text.replace("NO CYCLE", "NOCYCLE")
  1477. if identity_options.order is not None:
  1478. text += " ORDER" if identity_options.order else " NOORDER"
  1479. return text.strip()
  1480. def visit_computed_column(self, generated, **kw):
  1481. text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
  1482. generated.sqltext, include_table=False, literal_binds=True
  1483. )
  1484. if generated.persisted is True:
  1485. raise exc.CompileError(
  1486. "Oracle Database computed columns do not support 'stored' "
  1487. "persistence; set the 'persisted' flag to None or False for "
  1488. "Oracle Database support."
  1489. )
  1490. elif generated.persisted is False:
  1491. text += " VIRTUAL"
  1492. return text
  1493. def visit_identity_column(self, identity, **kw):
  1494. if identity.always is None:
  1495. kind = ""
  1496. else:
  1497. kind = "ALWAYS" if identity.always else "BY DEFAULT"
  1498. text = "GENERATED %s" % kind
  1499. if identity.on_null:
  1500. text += " ON NULL"
  1501. text += " AS IDENTITY"
  1502. options = self.get_identity_options(identity)
  1503. if options:
  1504. text += " (%s)" % options
  1505. return text
  1506. class OracleIdentifierPreparer(compiler.IdentifierPreparer):
  1507. reserved_words = {x.lower() for x in RESERVED_WORDS}
  1508. illegal_initial_characters = {str(dig) for dig in range(0, 10)}.union(
  1509. ["_", "$"]
  1510. )
  1511. def _bindparam_requires_quotes(self, value):
  1512. """Return True if the given identifier requires quoting."""
  1513. lc_value = value.lower()
  1514. return (
  1515. lc_value in self.reserved_words
  1516. or value[0] in self.illegal_initial_characters
  1517. or not self.legal_characters.match(str(value))
  1518. )
  1519. def format_savepoint(self, savepoint):
  1520. name = savepoint.ident.lstrip("_")
  1521. return super().format_savepoint(savepoint, name)
  1522. class OracleExecutionContext(default.DefaultExecutionContext):
  1523. def fire_sequence(self, seq, type_):
  1524. return self._execute_scalar(
  1525. "SELECT "
  1526. + self.identifier_preparer.format_sequence(seq)
  1527. + ".nextval FROM DUAL",
  1528. type_,
  1529. )
  1530. def pre_exec(self):
  1531. if self.statement and "_oracle_dblink" in self.execution_options:
  1532. self.statement = self.statement.replace(
  1533. dictionary.DB_LINK_PLACEHOLDER,
  1534. self.execution_options["_oracle_dblink"],
  1535. )
  1536. class OracleDialect(default.DefaultDialect):
  1537. name = "oracle"
  1538. supports_statement_cache = True
  1539. supports_alter = True
  1540. max_identifier_length = 128
  1541. _supports_offset_fetch = True
  1542. insert_returning = True
  1543. update_returning = True
  1544. delete_returning = True
  1545. div_is_floordiv = False
  1546. supports_simple_order_by_label = False
  1547. cte_follows_insert = True
  1548. returns_native_bytes = True
  1549. supports_sequences = True
  1550. sequences_optional = False
  1551. postfetch_lastrowid = False
  1552. default_paramstyle = "named"
  1553. colspecs = colspecs
  1554. ischema_names = ischema_names
  1555. requires_name_normalize = True
  1556. supports_comments = True
  1557. supports_default_values = False
  1558. supports_default_metavalue = True
  1559. supports_empty_insert = False
  1560. supports_identity_columns = True
  1561. statement_compiler = OracleCompiler
  1562. ddl_compiler = OracleDDLCompiler
  1563. type_compiler_cls = OracleTypeCompiler
  1564. preparer = OracleIdentifierPreparer
  1565. execution_ctx_cls = OracleExecutionContext
  1566. reflection_options = ("oracle_resolve_synonyms",)
  1567. _use_nchar_for_unicode = False
  1568. construct_arguments = [
  1569. (
  1570. sa_schema.Table,
  1571. {
  1572. "resolve_synonyms": False,
  1573. "on_commit": None,
  1574. "compress": False,
  1575. "tablespace": None,
  1576. },
  1577. ),
  1578. (
  1579. sa_schema.Index,
  1580. {
  1581. "bitmap": False,
  1582. "compress": False,
  1583. "vector": False,
  1584. },
  1585. ),
  1586. (sa_selectable.Select, {"fetch_approximate": False}),
  1587. (sa_selectable.CompoundSelect, {"fetch_approximate": False}),
  1588. ]
  1589. @util.deprecated_params(
  1590. use_binds_for_limits=(
  1591. "1.4",
  1592. "The ``use_binds_for_limits`` Oracle Database dialect parameter "
  1593. "is deprecated. The dialect now renders LIMIT / OFFSET integers "
  1594. "inline in all cases using a post-compilation hook, so that the "
  1595. "value is still represented by a 'bound parameter' on the Core "
  1596. "Expression side.",
  1597. )
  1598. )
  1599. def __init__(
  1600. self,
  1601. use_ansi=True,
  1602. optimize_limits=False,
  1603. use_binds_for_limits=None,
  1604. use_nchar_for_unicode=False,
  1605. exclude_tablespaces=("SYSTEM", "SYSAUX"),
  1606. enable_offset_fetch=True,
  1607. **kwargs,
  1608. ):
  1609. default.DefaultDialect.__init__(self, **kwargs)
  1610. self._use_nchar_for_unicode = use_nchar_for_unicode
  1611. self.use_ansi = use_ansi
  1612. self.optimize_limits = optimize_limits
  1613. self.exclude_tablespaces = exclude_tablespaces
  1614. self.enable_offset_fetch = self._supports_offset_fetch = (
  1615. enable_offset_fetch
  1616. )
  1617. def initialize(self, connection):
  1618. super().initialize(connection)
  1619. # Oracle 8i has RETURNING:
  1620. # https://docs.oracle.com/cd/A87860_01/doc/index.htm
  1621. # so does Oracle8:
  1622. # https://docs.oracle.com/cd/A64702_01/doc/index.htm
  1623. if self._is_oracle_8:
  1624. self.colspecs = self.colspecs.copy()
  1625. self.colspecs.pop(sqltypes.Interval)
  1626. self.use_ansi = False
  1627. self.supports_identity_columns = self.server_version_info >= (12,)
  1628. self._supports_offset_fetch = (
  1629. self.enable_offset_fetch and self.server_version_info >= (12,)
  1630. )
  1631. def _get_effective_compat_server_version_info(self, connection):
  1632. # dialect does not need compat levels below 12.2, so don't query
  1633. # in those cases
  1634. if self.server_version_info < (12, 2):
  1635. return self.server_version_info
  1636. try:
  1637. compat = connection.exec_driver_sql(
  1638. "SELECT value FROM v$parameter WHERE name = 'compatible'"
  1639. ).scalar()
  1640. except exc.DBAPIError:
  1641. compat = None
  1642. if compat:
  1643. try:
  1644. return tuple(int(x) for x in compat.split("."))
  1645. except:
  1646. return self.server_version_info
  1647. else:
  1648. return self.server_version_info
  1649. @property
  1650. def _is_oracle_8(self):
  1651. return self.server_version_info and self.server_version_info < (9,)
  1652. @property
  1653. def _supports_table_compression(self):
  1654. return self.server_version_info and self.server_version_info >= (10, 1)
  1655. @property
  1656. def _supports_table_compress_for(self):
  1657. return self.server_version_info and self.server_version_info >= (11,)
  1658. @property
  1659. def _supports_char_length(self):
  1660. return not self._is_oracle_8
  1661. @property
  1662. def _supports_update_returning_computed_cols(self):
  1663. # on version 18 this error is no longet present while it happens on 11
  1664. # it may work also on versions before the 18
  1665. return self.server_version_info and self.server_version_info >= (18,)
  1666. @property
  1667. def _supports_except_all(self):
  1668. return self.server_version_info and self.server_version_info >= (21,)
  1669. def do_release_savepoint(self, connection, name):
  1670. # Oracle does not support RELEASE SAVEPOINT
  1671. pass
  1672. def _check_max_identifier_length(self, connection):
  1673. if self._get_effective_compat_server_version_info(connection) < (
  1674. 12,
  1675. 2,
  1676. ):
  1677. return 30
  1678. else:
  1679. # use the default
  1680. return None
  1681. def get_isolation_level_values(self, dbapi_connection):
  1682. return ["READ COMMITTED", "SERIALIZABLE"]
  1683. def get_default_isolation_level(self, dbapi_conn):
  1684. try:
  1685. return self.get_isolation_level(dbapi_conn)
  1686. except NotImplementedError:
  1687. raise
  1688. except:
  1689. return "READ COMMITTED"
  1690. def _execute_reflection(
  1691. self, connection, query, dblink, returns_long, params=None
  1692. ):
  1693. if dblink and not dblink.startswith("@"):
  1694. dblink = f"@{dblink}"
  1695. execution_options = {
  1696. # handle db links
  1697. "_oracle_dblink": dblink or "",
  1698. # override any schema translate map
  1699. "schema_translate_map": None,
  1700. }
  1701. if dblink and returns_long:
  1702. # Oracle seems to error with
  1703. # "ORA-00997: illegal use of LONG datatype" when returning
  1704. # LONG columns via a dblink in a query with bind params
  1705. # This type seems to be very hard to cast into something else
  1706. # so it seems easier to just use bind param in this case
  1707. def visit_bindparam(bindparam):
  1708. bindparam.literal_execute = True
  1709. query = visitors.cloned_traverse(
  1710. query, {}, {"bindparam": visit_bindparam}
  1711. )
  1712. return connection.execute(
  1713. query, params, execution_options=execution_options
  1714. )
  1715. @util.memoized_property
  1716. def _has_table_query(self):
  1717. # materialized views are returned by all_tables
  1718. tables = (
  1719. select(
  1720. dictionary.all_tables.c.table_name,
  1721. dictionary.all_tables.c.owner,
  1722. )
  1723. .union_all(
  1724. select(
  1725. dictionary.all_views.c.view_name.label("table_name"),
  1726. dictionary.all_views.c.owner,
  1727. )
  1728. )
  1729. .subquery("tables_and_views")
  1730. )
  1731. query = select(tables.c.table_name).where(
  1732. tables.c.table_name == bindparam("table_name"),
  1733. tables.c.owner == bindparam("owner"),
  1734. )
  1735. return query
  1736. @reflection.cache
  1737. def has_table(
  1738. self, connection, table_name, schema=None, dblink=None, **kw
  1739. ):
  1740. """Supported kw arguments are: ``dblink`` to reflect via a db link."""
  1741. self._ensure_has_table_connection(connection)
  1742. if not schema:
  1743. schema = self.default_schema_name
  1744. params = {
  1745. "table_name": self.denormalize_name(table_name),
  1746. "owner": self.denormalize_schema_name(schema),
  1747. }
  1748. cursor = self._execute_reflection(
  1749. connection,
  1750. self._has_table_query,
  1751. dblink,
  1752. returns_long=False,
  1753. params=params,
  1754. )
  1755. return bool(cursor.scalar())
  1756. @reflection.cache
  1757. def has_sequence(
  1758. self, connection, sequence_name, schema=None, dblink=None, **kw
  1759. ):
  1760. """Supported kw arguments are: ``dblink`` to reflect via a db link."""
  1761. if not schema:
  1762. schema = self.default_schema_name
  1763. query = select(dictionary.all_sequences.c.sequence_name).where(
  1764. dictionary.all_sequences.c.sequence_name
  1765. == self.denormalize_schema_name(sequence_name),
  1766. dictionary.all_sequences.c.sequence_owner
  1767. == self.denormalize_schema_name(schema),
  1768. )
  1769. cursor = self._execute_reflection(
  1770. connection, query, dblink, returns_long=False
  1771. )
  1772. return bool(cursor.scalar())
  1773. def _get_default_schema_name(self, connection):
  1774. return self.normalize_name(
  1775. connection.exec_driver_sql(
  1776. "select sys_context( 'userenv', 'current_schema' ) from dual"
  1777. ).scalar()
  1778. )
  1779. def denormalize_schema_name(self, name):
  1780. # look for quoted_name
  1781. force = getattr(name, "quote", None)
  1782. if force is None and name == "public":
  1783. # look for case insensitive, no quoting specified, "public"
  1784. return "PUBLIC"
  1785. return super().denormalize_name(name)
  1786. @reflection.flexi_cache(
  1787. ("schema", InternalTraversal.dp_string),
  1788. ("filter_names", InternalTraversal.dp_string_list),
  1789. ("dblink", InternalTraversal.dp_string),
  1790. )
  1791. def _get_synonyms(self, connection, schema, filter_names, dblink, **kw):
  1792. owner = self.denormalize_schema_name(
  1793. schema or self.default_schema_name
  1794. )
  1795. has_filter_names, params = self._prepare_filter_names(filter_names)
  1796. query = select(
  1797. dictionary.all_synonyms.c.synonym_name,
  1798. dictionary.all_synonyms.c.table_name,
  1799. dictionary.all_synonyms.c.table_owner,
  1800. dictionary.all_synonyms.c.db_link,
  1801. ).where(dictionary.all_synonyms.c.owner == owner)
  1802. if has_filter_names:
  1803. query = query.where(
  1804. dictionary.all_synonyms.c.synonym_name.in_(
  1805. params["filter_names"]
  1806. )
  1807. )
  1808. result = self._execute_reflection(
  1809. connection, query, dblink, returns_long=False
  1810. ).mappings()
  1811. return result.all()
  1812. @lru_cache()
  1813. def _all_objects_query(
  1814. self, owner, scope, kind, has_filter_names, has_mat_views
  1815. ):
  1816. query = (
  1817. select(dictionary.all_objects.c.object_name)
  1818. .select_from(dictionary.all_objects)
  1819. .where(dictionary.all_objects.c.owner == owner)
  1820. )
  1821. # NOTE: materialized views are listed in all_objects twice;
  1822. # once as MATERIALIZE VIEW and once as TABLE
  1823. if kind is ObjectKind.ANY:
  1824. # materilaized view are listed also as tables so there is no
  1825. # need to add them to the in_.
  1826. query = query.where(
  1827. dictionary.all_objects.c.object_type.in_(("TABLE", "VIEW"))
  1828. )
  1829. else:
  1830. object_type = []
  1831. if ObjectKind.VIEW in kind:
  1832. object_type.append("VIEW")
  1833. if (
  1834. ObjectKind.MATERIALIZED_VIEW in kind
  1835. and ObjectKind.TABLE not in kind
  1836. ):
  1837. # materilaized view are listed also as tables so there is no
  1838. # need to add them to the in_ if also selecting tables.
  1839. object_type.append("MATERIALIZED VIEW")
  1840. if ObjectKind.TABLE in kind:
  1841. object_type.append("TABLE")
  1842. if has_mat_views and ObjectKind.MATERIALIZED_VIEW not in kind:
  1843. # materialized view are listed also as tables,
  1844. # so they need to be filtered out
  1845. # EXCEPT ALL / MINUS profiles as faster than using
  1846. # NOT EXISTS or NOT IN with a subquery, but it's in
  1847. # general faster to get the mat view names and exclude
  1848. # them only when needed
  1849. query = query.where(
  1850. dictionary.all_objects.c.object_name.not_in(
  1851. bindparam("mat_views")
  1852. )
  1853. )
  1854. query = query.where(
  1855. dictionary.all_objects.c.object_type.in_(object_type)
  1856. )
  1857. # handles scope
  1858. if scope is ObjectScope.DEFAULT:
  1859. query = query.where(dictionary.all_objects.c.temporary == "N")
  1860. elif scope is ObjectScope.TEMPORARY:
  1861. query = query.where(dictionary.all_objects.c.temporary == "Y")
  1862. if has_filter_names:
  1863. query = query.where(
  1864. dictionary.all_objects.c.object_name.in_(
  1865. bindparam("filter_names")
  1866. )
  1867. )
  1868. return query
  1869. @reflection.flexi_cache(
  1870. ("schema", InternalTraversal.dp_string),
  1871. ("scope", InternalTraversal.dp_plain_obj),
  1872. ("kind", InternalTraversal.dp_plain_obj),
  1873. ("filter_names", InternalTraversal.dp_string_list),
  1874. ("dblink", InternalTraversal.dp_string),
  1875. )
  1876. def _get_all_objects(
  1877. self, connection, schema, scope, kind, filter_names, dblink, **kw
  1878. ):
  1879. owner = self.denormalize_schema_name(
  1880. schema or self.default_schema_name
  1881. )
  1882. has_filter_names, params = self._prepare_filter_names(filter_names)
  1883. has_mat_views = False
  1884. if (
  1885. ObjectKind.TABLE in kind
  1886. and ObjectKind.MATERIALIZED_VIEW not in kind
  1887. ):
  1888. # see note in _all_objects_query
  1889. mat_views = self.get_materialized_view_names(
  1890. connection, schema, dblink, _normalize=False, **kw
  1891. )
  1892. if mat_views:
  1893. params["mat_views"] = mat_views
  1894. has_mat_views = True
  1895. query = self._all_objects_query(
  1896. owner, scope, kind, has_filter_names, has_mat_views
  1897. )
  1898. result = self._execute_reflection(
  1899. connection, query, dblink, returns_long=False, params=params
  1900. ).scalars()
  1901. return result.all()
  1902. def _handle_synonyms_decorator(fn):
  1903. @wraps(fn)
  1904. def wrapper(self, *args, **kwargs):
  1905. return self._handle_synonyms(fn, *args, **kwargs)
  1906. return wrapper
  1907. def _handle_synonyms(self, fn, connection, *args, **kwargs):
  1908. if not kwargs.get("oracle_resolve_synonyms", False):
  1909. return fn(self, connection, *args, **kwargs)
  1910. original_kw = kwargs.copy()
  1911. schema = kwargs.pop("schema", None)
  1912. result = self._get_synonyms(
  1913. connection,
  1914. schema=schema,
  1915. filter_names=kwargs.pop("filter_names", None),
  1916. dblink=kwargs.pop("dblink", None),
  1917. info_cache=kwargs.get("info_cache", None),
  1918. )
  1919. dblinks_owners = defaultdict(dict)
  1920. for row in result:
  1921. key = row["db_link"], row["table_owner"]
  1922. tn = self.normalize_name(row["table_name"])
  1923. dblinks_owners[key][tn] = row["synonym_name"]
  1924. if not dblinks_owners:
  1925. # No synonym, do the plain thing
  1926. return fn(self, connection, *args, **original_kw)
  1927. data = {}
  1928. for (dblink, table_owner), mapping in dblinks_owners.items():
  1929. call_kw = {
  1930. **original_kw,
  1931. "schema": table_owner,
  1932. "dblink": self.normalize_name(dblink),
  1933. "filter_names": mapping.keys(),
  1934. }
  1935. call_result = fn(self, connection, *args, **call_kw)
  1936. for (_, tn), value in call_result:
  1937. synonym_name = self.normalize_name(mapping[tn])
  1938. data[(schema, synonym_name)] = value
  1939. return data.items()
  1940. @reflection.cache
  1941. def get_schema_names(self, connection, dblink=None, **kw):
  1942. """Supported kw arguments are: ``dblink`` to reflect via a db link."""
  1943. query = select(dictionary.all_users.c.username).order_by(
  1944. dictionary.all_users.c.username
  1945. )
  1946. result = self._execute_reflection(
  1947. connection, query, dblink, returns_long=False
  1948. ).scalars()
  1949. return [self.normalize_name(row) for row in result]
  1950. @reflection.cache
  1951. def get_table_names(self, connection, schema=None, dblink=None, **kw):
  1952. """Supported kw arguments are: ``dblink`` to reflect via a db link."""
  1953. # note that table_names() isn't loading DBLINKed or synonym'ed tables
  1954. if schema is None:
  1955. schema = self.default_schema_name
  1956. den_schema = self.denormalize_schema_name(schema)
  1957. if kw.get("oracle_resolve_synonyms", False):
  1958. tables = (
  1959. select(
  1960. dictionary.all_tables.c.table_name,
  1961. dictionary.all_tables.c.owner,
  1962. dictionary.all_tables.c.iot_name,
  1963. dictionary.all_tables.c.duration,
  1964. dictionary.all_tables.c.tablespace_name,
  1965. )
  1966. .union_all(
  1967. select(
  1968. dictionary.all_synonyms.c.synonym_name.label(
  1969. "table_name"
  1970. ),
  1971. dictionary.all_synonyms.c.owner,
  1972. dictionary.all_tables.c.iot_name,
  1973. dictionary.all_tables.c.duration,
  1974. dictionary.all_tables.c.tablespace_name,
  1975. )
  1976. .select_from(dictionary.all_tables)
  1977. .join(
  1978. dictionary.all_synonyms,
  1979. and_(
  1980. dictionary.all_tables.c.table_name
  1981. == dictionary.all_synonyms.c.table_name,
  1982. dictionary.all_tables.c.owner
  1983. == func.coalesce(
  1984. dictionary.all_synonyms.c.table_owner,
  1985. dictionary.all_synonyms.c.owner,
  1986. ),
  1987. ),
  1988. )
  1989. )
  1990. .subquery("available_tables")
  1991. )
  1992. else:
  1993. tables = dictionary.all_tables
  1994. query = select(tables.c.table_name)
  1995. if self.exclude_tablespaces:
  1996. query = query.where(
  1997. func.coalesce(
  1998. tables.c.tablespace_name, "no tablespace"
  1999. ).not_in(self.exclude_tablespaces)
  2000. )
  2001. query = query.where(
  2002. tables.c.owner == den_schema,
  2003. tables.c.iot_name.is_(null()),
  2004. tables.c.duration.is_(null()),
  2005. )
  2006. # remove materialized views
  2007. mat_query = select(
  2008. dictionary.all_mviews.c.mview_name.label("table_name")
  2009. ).where(dictionary.all_mviews.c.owner == den_schema)
  2010. query = (
  2011. query.except_all(mat_query)
  2012. if self._supports_except_all
  2013. else query.except_(mat_query)
  2014. )
  2015. result = self._execute_reflection(
  2016. connection, query, dblink, returns_long=False
  2017. ).scalars()
  2018. return [self.normalize_name(row) for row in result]
  2019. @reflection.cache
  2020. def get_temp_table_names(self, connection, dblink=None, **kw):
  2021. """Supported kw arguments are: ``dblink`` to reflect via a db link."""
  2022. schema = self.denormalize_schema_name(self.default_schema_name)
  2023. query = select(dictionary.all_tables.c.table_name)
  2024. if self.exclude_tablespaces:
  2025. query = query.where(
  2026. func.coalesce(
  2027. dictionary.all_tables.c.tablespace_name, "no tablespace"
  2028. ).not_in(self.exclude_tablespaces)
  2029. )
  2030. query = query.where(
  2031. dictionary.all_tables.c.owner == schema,
  2032. dictionary.all_tables.c.iot_name.is_(null()),
  2033. dictionary.all_tables.c.duration.is_not(null()),
  2034. )
  2035. result = self._execute_reflection(
  2036. connection, query, dblink, returns_long=False
  2037. ).scalars()
  2038. return [self.normalize_name(row) for row in result]
  2039. @reflection.cache
  2040. def get_materialized_view_names(
  2041. self, connection, schema=None, dblink=None, _normalize=True, **kw
  2042. ):
  2043. """Supported kw arguments are: ``dblink`` to reflect via a db link."""
  2044. if not schema:
  2045. schema = self.default_schema_name
  2046. query = select(dictionary.all_mviews.c.mview_name).where(
  2047. dictionary.all_mviews.c.owner
  2048. == self.denormalize_schema_name(schema)
  2049. )
  2050. result = self._execute_reflection(
  2051. connection, query, dblink, returns_long=False
  2052. ).scalars()
  2053. if _normalize:
  2054. return [self.normalize_name(row) for row in result]
  2055. else:
  2056. return result.all()
  2057. @reflection.cache
  2058. def get_view_names(self, connection, schema=None, dblink=None, **kw):
  2059. """Supported kw arguments are: ``dblink`` to reflect via a db link."""
  2060. if not schema:
  2061. schema = self.default_schema_name
  2062. query = select(dictionary.all_views.c.view_name).where(
  2063. dictionary.all_views.c.owner
  2064. == self.denormalize_schema_name(schema)
  2065. )
  2066. result = self._execute_reflection(
  2067. connection, query, dblink, returns_long=False
  2068. ).scalars()
  2069. return [self.normalize_name(row) for row in result]
  2070. @reflection.cache
  2071. def get_sequence_names(self, connection, schema=None, dblink=None, **kw):
  2072. """Supported kw arguments are: ``dblink`` to reflect via a db link."""
  2073. if not schema:
  2074. schema = self.default_schema_name
  2075. query = select(dictionary.all_sequences.c.sequence_name).where(
  2076. dictionary.all_sequences.c.sequence_owner
  2077. == self.denormalize_schema_name(schema)
  2078. )
  2079. result = self._execute_reflection(
  2080. connection, query, dblink, returns_long=False
  2081. ).scalars()
  2082. return [self.normalize_name(row) for row in result]
  2083. def _value_or_raise(self, data, table, schema):
  2084. table = self.normalize_name(str(table))
  2085. try:
  2086. return dict(data)[(schema, table)]
  2087. except KeyError:
  2088. raise exc.NoSuchTableError(
  2089. f"{schema}.{table}" if schema else table
  2090. ) from None
  2091. def _prepare_filter_names(self, filter_names):
  2092. if filter_names:
  2093. fn = [self.denormalize_name(name) for name in filter_names]
  2094. return True, {"filter_names": fn}
  2095. else:
  2096. return False, {}
  2097. @reflection.cache
  2098. def get_table_options(self, connection, table_name, schema=None, **kw):
  2099. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2100. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2101. """
  2102. data = self.get_multi_table_options(
  2103. connection,
  2104. schema=schema,
  2105. filter_names=[table_name],
  2106. scope=ObjectScope.ANY,
  2107. kind=ObjectKind.ANY,
  2108. **kw,
  2109. )
  2110. return self._value_or_raise(data, table_name, schema)
  2111. @lru_cache()
  2112. def _table_options_query(
  2113. self, owner, scope, kind, has_filter_names, has_mat_views
  2114. ):
  2115. query = select(
  2116. dictionary.all_tables.c.table_name,
  2117. (
  2118. dictionary.all_tables.c.compression
  2119. if self._supports_table_compression
  2120. else sql.null().label("compression")
  2121. ),
  2122. (
  2123. dictionary.all_tables.c.compress_for
  2124. if self._supports_table_compress_for
  2125. else sql.null().label("compress_for")
  2126. ),
  2127. dictionary.all_tables.c.tablespace_name,
  2128. ).where(dictionary.all_tables.c.owner == owner)
  2129. if has_filter_names:
  2130. query = query.where(
  2131. dictionary.all_tables.c.table_name.in_(
  2132. bindparam("filter_names")
  2133. )
  2134. )
  2135. if scope is ObjectScope.DEFAULT:
  2136. query = query.where(dictionary.all_tables.c.duration.is_(null()))
  2137. elif scope is ObjectScope.TEMPORARY:
  2138. query = query.where(
  2139. dictionary.all_tables.c.duration.is_not(null())
  2140. )
  2141. if (
  2142. has_mat_views
  2143. and ObjectKind.TABLE in kind
  2144. and ObjectKind.MATERIALIZED_VIEW not in kind
  2145. ):
  2146. # cant use EXCEPT ALL / MINUS here because we don't have an
  2147. # excludable row vs. the query above
  2148. # outerjoin + where null works better on oracle 21 but 11 does
  2149. # not like it at all. this is the next best thing
  2150. query = query.where(
  2151. dictionary.all_tables.c.table_name.not_in(
  2152. bindparam("mat_views")
  2153. )
  2154. )
  2155. elif (
  2156. ObjectKind.TABLE not in kind
  2157. and ObjectKind.MATERIALIZED_VIEW in kind
  2158. ):
  2159. query = query.where(
  2160. dictionary.all_tables.c.table_name.in_(bindparam("mat_views"))
  2161. )
  2162. return query
  2163. @_handle_synonyms_decorator
  2164. def get_multi_table_options(
  2165. self,
  2166. connection,
  2167. *,
  2168. schema,
  2169. filter_names,
  2170. scope,
  2171. kind,
  2172. dblink=None,
  2173. **kw,
  2174. ):
  2175. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2176. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2177. """
  2178. owner = self.denormalize_schema_name(
  2179. schema or self.default_schema_name
  2180. )
  2181. has_filter_names, params = self._prepare_filter_names(filter_names)
  2182. has_mat_views = False
  2183. if (
  2184. ObjectKind.TABLE in kind
  2185. and ObjectKind.MATERIALIZED_VIEW not in kind
  2186. ):
  2187. # see note in _table_options_query
  2188. mat_views = self.get_materialized_view_names(
  2189. connection, schema, dblink, _normalize=False, **kw
  2190. )
  2191. if mat_views:
  2192. params["mat_views"] = mat_views
  2193. has_mat_views = True
  2194. elif (
  2195. ObjectKind.TABLE not in kind
  2196. and ObjectKind.MATERIALIZED_VIEW in kind
  2197. ):
  2198. mat_views = self.get_materialized_view_names(
  2199. connection, schema, dblink, _normalize=False, **kw
  2200. )
  2201. params["mat_views"] = mat_views
  2202. options = {}
  2203. default = ReflectionDefaults.table_options
  2204. if ObjectKind.TABLE in kind or ObjectKind.MATERIALIZED_VIEW in kind:
  2205. query = self._table_options_query(
  2206. owner, scope, kind, has_filter_names, has_mat_views
  2207. )
  2208. result = self._execute_reflection(
  2209. connection, query, dblink, returns_long=False, params=params
  2210. )
  2211. for table, compression, compress_for, tablespace in result:
  2212. data = default()
  2213. if compression == "ENABLED":
  2214. data["oracle_compress"] = compress_for
  2215. if tablespace:
  2216. data["oracle_tablespace"] = tablespace
  2217. options[(schema, self.normalize_name(table))] = data
  2218. if ObjectKind.VIEW in kind and ObjectScope.DEFAULT in scope:
  2219. # add the views (no temporary views)
  2220. for view in self.get_view_names(connection, schema, dblink, **kw):
  2221. if not filter_names or view in filter_names:
  2222. options[(schema, view)] = default()
  2223. return options.items()
  2224. @reflection.cache
  2225. def get_columns(self, connection, table_name, schema=None, **kw):
  2226. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2227. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2228. """
  2229. data = self.get_multi_columns(
  2230. connection,
  2231. schema=schema,
  2232. filter_names=[table_name],
  2233. scope=ObjectScope.ANY,
  2234. kind=ObjectKind.ANY,
  2235. **kw,
  2236. )
  2237. return self._value_or_raise(data, table_name, schema)
  2238. def _run_batches(
  2239. self, connection, query, dblink, returns_long, mappings, all_objects
  2240. ):
  2241. each_batch = 500
  2242. batches = list(all_objects)
  2243. while batches:
  2244. batch = batches[0:each_batch]
  2245. batches[0:each_batch] = []
  2246. result = self._execute_reflection(
  2247. connection,
  2248. query,
  2249. dblink,
  2250. returns_long=returns_long,
  2251. params={"all_objects": batch},
  2252. )
  2253. if mappings:
  2254. yield from result.mappings()
  2255. else:
  2256. yield from result
  2257. @lru_cache()
  2258. def _column_query(self, owner):
  2259. all_cols = dictionary.all_tab_cols
  2260. all_comments = dictionary.all_col_comments
  2261. all_ids = dictionary.all_tab_identity_cols
  2262. if self.server_version_info >= (12,):
  2263. add_cols = (
  2264. all_cols.c.default_on_null,
  2265. sql.case(
  2266. (all_ids.c.table_name.is_(None), sql.null()),
  2267. else_=all_ids.c.generation_type
  2268. + ","
  2269. + all_ids.c.identity_options,
  2270. ).label("identity_options"),
  2271. )
  2272. join_identity_cols = True
  2273. else:
  2274. add_cols = (
  2275. sql.null().label("default_on_null"),
  2276. sql.null().label("identity_options"),
  2277. )
  2278. join_identity_cols = False
  2279. # NOTE: on oracle cannot create tables/views without columns and
  2280. # a table cannot have all column hidden:
  2281. # ORA-54039: table must have at least one column that is not invisible
  2282. # all_tab_cols returns data for tables/views/mat-views.
  2283. # all_tab_cols does not return recycled tables
  2284. query = (
  2285. select(
  2286. all_cols.c.table_name,
  2287. all_cols.c.column_name,
  2288. all_cols.c.data_type,
  2289. all_cols.c.char_length,
  2290. all_cols.c.data_precision,
  2291. all_cols.c.data_scale,
  2292. all_cols.c.nullable,
  2293. all_cols.c.data_default,
  2294. all_comments.c.comments,
  2295. all_cols.c.virtual_column,
  2296. *add_cols,
  2297. ).select_from(all_cols)
  2298. # NOTE: all_col_comments has a row for each column even if no
  2299. # comment is present, so a join could be performed, but there
  2300. # seems to be no difference compared to an outer join
  2301. .outerjoin(
  2302. all_comments,
  2303. and_(
  2304. all_cols.c.table_name == all_comments.c.table_name,
  2305. all_cols.c.column_name == all_comments.c.column_name,
  2306. all_cols.c.owner == all_comments.c.owner,
  2307. ),
  2308. )
  2309. )
  2310. if join_identity_cols:
  2311. query = query.outerjoin(
  2312. all_ids,
  2313. and_(
  2314. all_cols.c.table_name == all_ids.c.table_name,
  2315. all_cols.c.column_name == all_ids.c.column_name,
  2316. all_cols.c.owner == all_ids.c.owner,
  2317. ),
  2318. )
  2319. query = query.where(
  2320. all_cols.c.table_name.in_(bindparam("all_objects")),
  2321. all_cols.c.hidden_column == "NO",
  2322. all_cols.c.owner == owner,
  2323. ).order_by(all_cols.c.table_name, all_cols.c.column_id)
  2324. return query
  2325. @_handle_synonyms_decorator
  2326. def get_multi_columns(
  2327. self,
  2328. connection,
  2329. *,
  2330. schema,
  2331. filter_names,
  2332. scope,
  2333. kind,
  2334. dblink=None,
  2335. **kw,
  2336. ):
  2337. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2338. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2339. """
  2340. owner = self.denormalize_schema_name(
  2341. schema or self.default_schema_name
  2342. )
  2343. query = self._column_query(owner)
  2344. if (
  2345. filter_names
  2346. and kind is ObjectKind.ANY
  2347. and scope is ObjectScope.ANY
  2348. ):
  2349. all_objects = [self.denormalize_name(n) for n in filter_names]
  2350. else:
  2351. all_objects = self._get_all_objects(
  2352. connection, schema, scope, kind, filter_names, dblink, **kw
  2353. )
  2354. columns = defaultdict(list)
  2355. # all_tab_cols.data_default is LONG
  2356. result = self._run_batches(
  2357. connection,
  2358. query,
  2359. dblink,
  2360. returns_long=True,
  2361. mappings=True,
  2362. all_objects=all_objects,
  2363. )
  2364. def maybe_int(value):
  2365. if isinstance(value, float) and value.is_integer():
  2366. return int(value)
  2367. else:
  2368. return value
  2369. remove_size = re.compile(r"\(\d+\)")
  2370. for row_dict in result:
  2371. table_name = self.normalize_name(row_dict["table_name"])
  2372. orig_colname = row_dict["column_name"]
  2373. colname = self.normalize_name(orig_colname)
  2374. coltype = row_dict["data_type"]
  2375. precision = maybe_int(row_dict["data_precision"])
  2376. if coltype == "NUMBER":
  2377. scale = maybe_int(row_dict["data_scale"])
  2378. if precision is None and scale == 0:
  2379. coltype = INTEGER()
  2380. else:
  2381. coltype = NUMBER(precision, scale)
  2382. elif coltype == "FLOAT":
  2383. # https://docs.oracle.com/cd/B14117_01/server.101/b10758/sqlqr06.htm
  2384. if precision == 126:
  2385. # The DOUBLE PRECISION datatype is a floating-point
  2386. # number with binary precision 126.
  2387. coltype = DOUBLE_PRECISION()
  2388. elif precision == 63:
  2389. # The REAL datatype is a floating-point number with a
  2390. # binary precision of 63, or 18 decimal.
  2391. coltype = REAL()
  2392. else:
  2393. # non standard precision
  2394. coltype = FLOAT(binary_precision=precision)
  2395. elif coltype in ("VARCHAR2", "NVARCHAR2", "CHAR", "NCHAR"):
  2396. char_length = maybe_int(row_dict["char_length"])
  2397. coltype = self.ischema_names.get(coltype)(char_length)
  2398. elif "WITH TIME ZONE" in coltype:
  2399. coltype = TIMESTAMP(timezone=True)
  2400. elif "WITH LOCAL TIME ZONE" in coltype:
  2401. coltype = TIMESTAMP(local_timezone=True)
  2402. else:
  2403. coltype = re.sub(remove_size, "", coltype)
  2404. try:
  2405. coltype = self.ischema_names[coltype]
  2406. except KeyError:
  2407. util.warn(
  2408. "Did not recognize type '%s' of column '%s'"
  2409. % (coltype, colname)
  2410. )
  2411. coltype = sqltypes.NULLTYPE
  2412. default = row_dict["data_default"]
  2413. if row_dict["virtual_column"] == "YES":
  2414. computed = dict(sqltext=default)
  2415. default = None
  2416. else:
  2417. computed = None
  2418. identity_options = row_dict["identity_options"]
  2419. if identity_options is not None:
  2420. identity = self._parse_identity_options(
  2421. identity_options, row_dict["default_on_null"]
  2422. )
  2423. default = None
  2424. else:
  2425. identity = None
  2426. cdict = {
  2427. "name": colname,
  2428. "type": coltype,
  2429. "nullable": row_dict["nullable"] == "Y",
  2430. "default": default,
  2431. "comment": row_dict["comments"],
  2432. }
  2433. if orig_colname.lower() == orig_colname:
  2434. cdict["quote"] = True
  2435. if computed is not None:
  2436. cdict["computed"] = computed
  2437. if identity is not None:
  2438. cdict["identity"] = identity
  2439. columns[(schema, table_name)].append(cdict)
  2440. # NOTE: default not needed since all tables have columns
  2441. # default = ReflectionDefaults.columns
  2442. # return (
  2443. # (key, value if value else default())
  2444. # for key, value in columns.items()
  2445. # )
  2446. return columns.items()
  2447. def _parse_identity_options(self, identity_options, default_on_null):
  2448. # identity_options is a string that starts with 'ALWAYS,' or
  2449. # 'BY DEFAULT,' and continues with
  2450. # START WITH: 1, INCREMENT BY: 1, MAX_VALUE: 123, MIN_VALUE: 1,
  2451. # CYCLE_FLAG: N, CACHE_SIZE: 1, ORDER_FLAG: N, SCALE_FLAG: N,
  2452. # EXTEND_FLAG: N, SESSION_FLAG: N, KEEP_VALUE: N
  2453. parts = [p.strip() for p in identity_options.split(",")]
  2454. identity = {
  2455. "always": parts[0] == "ALWAYS",
  2456. "on_null": default_on_null == "YES",
  2457. }
  2458. for part in parts[1:]:
  2459. option, value = part.split(":")
  2460. value = value.strip()
  2461. if "START WITH" in option:
  2462. identity["start"] = int(value)
  2463. elif "INCREMENT BY" in option:
  2464. identity["increment"] = int(value)
  2465. elif "MAX_VALUE" in option:
  2466. identity["maxvalue"] = int(value)
  2467. elif "MIN_VALUE" in option:
  2468. identity["minvalue"] = int(value)
  2469. elif "CYCLE_FLAG" in option:
  2470. identity["cycle"] = value == "Y"
  2471. elif "CACHE_SIZE" in option:
  2472. identity["cache"] = int(value)
  2473. elif "ORDER_FLAG" in option:
  2474. identity["order"] = value == "Y"
  2475. return identity
  2476. @reflection.cache
  2477. def get_table_comment(self, connection, table_name, schema=None, **kw):
  2478. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2479. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2480. """
  2481. data = self.get_multi_table_comment(
  2482. connection,
  2483. schema=schema,
  2484. filter_names=[table_name],
  2485. scope=ObjectScope.ANY,
  2486. kind=ObjectKind.ANY,
  2487. **kw,
  2488. )
  2489. return self._value_or_raise(data, table_name, schema)
  2490. @lru_cache()
  2491. def _comment_query(self, owner, scope, kind, has_filter_names):
  2492. # NOTE: all_tab_comments / all_mview_comments have a row for all
  2493. # object even if they don't have comments
  2494. queries = []
  2495. if ObjectKind.TABLE in kind or ObjectKind.VIEW in kind:
  2496. # all_tab_comments returns also plain views
  2497. tbl_view = select(
  2498. dictionary.all_tab_comments.c.table_name,
  2499. dictionary.all_tab_comments.c.comments,
  2500. ).where(
  2501. dictionary.all_tab_comments.c.owner == owner,
  2502. dictionary.all_tab_comments.c.table_name.not_like("BIN$%"),
  2503. )
  2504. if ObjectKind.VIEW not in kind:
  2505. tbl_view = tbl_view.where(
  2506. dictionary.all_tab_comments.c.table_type == "TABLE"
  2507. )
  2508. elif ObjectKind.TABLE not in kind:
  2509. tbl_view = tbl_view.where(
  2510. dictionary.all_tab_comments.c.table_type == "VIEW"
  2511. )
  2512. queries.append(tbl_view)
  2513. if ObjectKind.MATERIALIZED_VIEW in kind:
  2514. mat_view = select(
  2515. dictionary.all_mview_comments.c.mview_name.label("table_name"),
  2516. dictionary.all_mview_comments.c.comments,
  2517. ).where(
  2518. dictionary.all_mview_comments.c.owner == owner,
  2519. dictionary.all_mview_comments.c.mview_name.not_like("BIN$%"),
  2520. )
  2521. queries.append(mat_view)
  2522. if len(queries) == 1:
  2523. query = queries[0]
  2524. else:
  2525. union = sql.union_all(*queries).subquery("tables_and_views")
  2526. query = select(union.c.table_name, union.c.comments)
  2527. name_col = query.selected_columns.table_name
  2528. if scope in (ObjectScope.DEFAULT, ObjectScope.TEMPORARY):
  2529. temp = "Y" if scope is ObjectScope.TEMPORARY else "N"
  2530. # need distinct since materialized view are listed also
  2531. # as tables in all_objects
  2532. query = query.distinct().join(
  2533. dictionary.all_objects,
  2534. and_(
  2535. dictionary.all_objects.c.owner == owner,
  2536. dictionary.all_objects.c.object_name == name_col,
  2537. dictionary.all_objects.c.temporary == temp,
  2538. ),
  2539. )
  2540. if has_filter_names:
  2541. query = query.where(name_col.in_(bindparam("filter_names")))
  2542. return query
  2543. @_handle_synonyms_decorator
  2544. def get_multi_table_comment(
  2545. self,
  2546. connection,
  2547. *,
  2548. schema,
  2549. filter_names,
  2550. scope,
  2551. kind,
  2552. dblink=None,
  2553. **kw,
  2554. ):
  2555. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2556. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2557. """
  2558. owner = self.denormalize_schema_name(
  2559. schema or self.default_schema_name
  2560. )
  2561. has_filter_names, params = self._prepare_filter_names(filter_names)
  2562. query = self._comment_query(owner, scope, kind, has_filter_names)
  2563. result = self._execute_reflection(
  2564. connection, query, dblink, returns_long=False, params=params
  2565. )
  2566. default = ReflectionDefaults.table_comment
  2567. # materialized views by default seem to have a comment like
  2568. # "snapshot table for snapshot owner.mat_view_name"
  2569. ignore_mat_view = "snapshot table for snapshot "
  2570. return (
  2571. (
  2572. (schema, self.normalize_name(table)),
  2573. (
  2574. {"text": comment}
  2575. if comment is not None
  2576. and not comment.startswith(ignore_mat_view)
  2577. else default()
  2578. ),
  2579. )
  2580. for table, comment in result
  2581. )
  2582. @reflection.cache
  2583. def get_indexes(self, connection, table_name, schema=None, **kw):
  2584. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2585. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2586. """
  2587. data = self.get_multi_indexes(
  2588. connection,
  2589. schema=schema,
  2590. filter_names=[table_name],
  2591. scope=ObjectScope.ANY,
  2592. kind=ObjectKind.ANY,
  2593. **kw,
  2594. )
  2595. return self._value_or_raise(data, table_name, schema)
  2596. @lru_cache()
  2597. def _index_query(self, owner):
  2598. return (
  2599. select(
  2600. dictionary.all_ind_columns.c.table_name,
  2601. dictionary.all_ind_columns.c.index_name,
  2602. dictionary.all_ind_columns.c.column_name,
  2603. dictionary.all_indexes.c.index_type,
  2604. dictionary.all_indexes.c.uniqueness,
  2605. dictionary.all_indexes.c.compression,
  2606. dictionary.all_indexes.c.prefix_length,
  2607. dictionary.all_ind_columns.c.descend,
  2608. dictionary.all_ind_expressions.c.column_expression,
  2609. )
  2610. .select_from(dictionary.all_ind_columns)
  2611. .join(
  2612. dictionary.all_indexes,
  2613. sql.and_(
  2614. dictionary.all_ind_columns.c.index_name
  2615. == dictionary.all_indexes.c.index_name,
  2616. dictionary.all_ind_columns.c.index_owner
  2617. == dictionary.all_indexes.c.owner,
  2618. ),
  2619. )
  2620. .outerjoin(
  2621. # NOTE: this adds about 20% to the query time. Using a
  2622. # case expression with a scalar subquery only when needed
  2623. # with the assumption that most indexes are not expression
  2624. # would be faster but oracle does not like that with
  2625. # LONG datatype. It errors with:
  2626. # ORA-00997: illegal use of LONG datatype
  2627. dictionary.all_ind_expressions,
  2628. sql.and_(
  2629. dictionary.all_ind_expressions.c.index_name
  2630. == dictionary.all_ind_columns.c.index_name,
  2631. dictionary.all_ind_expressions.c.index_owner
  2632. == dictionary.all_ind_columns.c.index_owner,
  2633. dictionary.all_ind_expressions.c.column_position
  2634. == dictionary.all_ind_columns.c.column_position,
  2635. ),
  2636. )
  2637. .where(
  2638. dictionary.all_indexes.c.table_owner == owner,
  2639. dictionary.all_indexes.c.table_name.in_(
  2640. bindparam("all_objects")
  2641. ),
  2642. )
  2643. .order_by(
  2644. dictionary.all_ind_columns.c.index_name,
  2645. dictionary.all_ind_columns.c.column_position,
  2646. )
  2647. )
  2648. @reflection.flexi_cache(
  2649. ("schema", InternalTraversal.dp_string),
  2650. ("dblink", InternalTraversal.dp_string),
  2651. ("all_objects", InternalTraversal.dp_string_list),
  2652. )
  2653. def _get_indexes_rows(self, connection, schema, dblink, all_objects, **kw):
  2654. owner = self.denormalize_schema_name(
  2655. schema or self.default_schema_name
  2656. )
  2657. query = self._index_query(owner)
  2658. pks = {
  2659. row_dict["constraint_name"]
  2660. for row_dict in self._get_all_constraint_rows(
  2661. connection, schema, dblink, all_objects, **kw
  2662. )
  2663. if row_dict["constraint_type"] == "P"
  2664. }
  2665. # all_ind_expressions.column_expression is LONG
  2666. result = self._run_batches(
  2667. connection,
  2668. query,
  2669. dblink,
  2670. returns_long=True,
  2671. mappings=True,
  2672. all_objects=all_objects,
  2673. )
  2674. return [
  2675. row_dict
  2676. for row_dict in result
  2677. if row_dict["index_name"] not in pks
  2678. ]
  2679. @_handle_synonyms_decorator
  2680. def get_multi_indexes(
  2681. self,
  2682. connection,
  2683. *,
  2684. schema,
  2685. filter_names,
  2686. scope,
  2687. kind,
  2688. dblink=None,
  2689. **kw,
  2690. ):
  2691. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2692. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2693. """
  2694. all_objects = self._get_all_objects(
  2695. connection, schema, scope, kind, filter_names, dblink, **kw
  2696. )
  2697. uniqueness = {"NONUNIQUE": False, "UNIQUE": True}
  2698. enabled = {"DISABLED": False, "ENABLED": True}
  2699. is_bitmap = {"BITMAP", "FUNCTION-BASED BITMAP"}
  2700. indexes = defaultdict(dict)
  2701. for row_dict in self._get_indexes_rows(
  2702. connection, schema, dblink, all_objects, **kw
  2703. ):
  2704. index_name = self.normalize_name(row_dict["index_name"])
  2705. table_name = self.normalize_name(row_dict["table_name"])
  2706. table_indexes = indexes[(schema, table_name)]
  2707. if index_name not in table_indexes:
  2708. table_indexes[index_name] = index_dict = {
  2709. "name": index_name,
  2710. "column_names": [],
  2711. "dialect_options": {},
  2712. "unique": uniqueness.get(row_dict["uniqueness"], False),
  2713. }
  2714. do = index_dict["dialect_options"]
  2715. if row_dict["index_type"] in is_bitmap:
  2716. do["oracle_bitmap"] = True
  2717. if enabled.get(row_dict["compression"], False):
  2718. do["oracle_compress"] = row_dict["prefix_length"]
  2719. else:
  2720. index_dict = table_indexes[index_name]
  2721. expr = row_dict["column_expression"]
  2722. if expr is not None:
  2723. index_dict["column_names"].append(None)
  2724. if "expressions" in index_dict:
  2725. index_dict["expressions"].append(expr)
  2726. else:
  2727. index_dict["expressions"] = index_dict["column_names"][:-1]
  2728. index_dict["expressions"].append(expr)
  2729. if row_dict["descend"].lower() != "asc":
  2730. assert row_dict["descend"].lower() == "desc"
  2731. cs = index_dict.setdefault("column_sorting", {})
  2732. cs[expr] = ("desc",)
  2733. else:
  2734. assert row_dict["descend"].lower() == "asc"
  2735. cn = self.normalize_name(row_dict["column_name"])
  2736. index_dict["column_names"].append(cn)
  2737. if "expressions" in index_dict:
  2738. index_dict["expressions"].append(cn)
  2739. default = ReflectionDefaults.indexes
  2740. return (
  2741. (key, list(indexes[key].values()) if key in indexes else default())
  2742. for key in (
  2743. (schema, self.normalize_name(obj_name))
  2744. for obj_name in all_objects
  2745. )
  2746. )
  2747. @reflection.cache
  2748. def get_pk_constraint(self, connection, table_name, schema=None, **kw):
  2749. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2750. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2751. """
  2752. data = self.get_multi_pk_constraint(
  2753. connection,
  2754. schema=schema,
  2755. filter_names=[table_name],
  2756. scope=ObjectScope.ANY,
  2757. kind=ObjectKind.ANY,
  2758. **kw,
  2759. )
  2760. return self._value_or_raise(data, table_name, schema)
  2761. @lru_cache()
  2762. def _constraint_query(self, owner):
  2763. local = dictionary.all_cons_columns.alias("local")
  2764. remote = dictionary.all_cons_columns.alias("remote")
  2765. return (
  2766. select(
  2767. dictionary.all_constraints.c.table_name,
  2768. dictionary.all_constraints.c.constraint_type,
  2769. dictionary.all_constraints.c.constraint_name,
  2770. local.c.column_name.label("local_column"),
  2771. remote.c.table_name.label("remote_table"),
  2772. remote.c.column_name.label("remote_column"),
  2773. remote.c.owner.label("remote_owner"),
  2774. dictionary.all_constraints.c.search_condition,
  2775. dictionary.all_constraints.c.delete_rule,
  2776. )
  2777. .select_from(dictionary.all_constraints)
  2778. .join(
  2779. local,
  2780. and_(
  2781. local.c.owner == dictionary.all_constraints.c.owner,
  2782. dictionary.all_constraints.c.constraint_name
  2783. == local.c.constraint_name,
  2784. ),
  2785. )
  2786. .outerjoin(
  2787. remote,
  2788. and_(
  2789. dictionary.all_constraints.c.r_owner == remote.c.owner,
  2790. dictionary.all_constraints.c.r_constraint_name
  2791. == remote.c.constraint_name,
  2792. or_(
  2793. remote.c.position.is_(sql.null()),
  2794. local.c.position == remote.c.position,
  2795. ),
  2796. ),
  2797. )
  2798. .where(
  2799. dictionary.all_constraints.c.owner == owner,
  2800. dictionary.all_constraints.c.table_name.in_(
  2801. bindparam("all_objects")
  2802. ),
  2803. dictionary.all_constraints.c.constraint_type.in_(
  2804. ("R", "P", "U", "C")
  2805. ),
  2806. )
  2807. .order_by(
  2808. dictionary.all_constraints.c.constraint_name, local.c.position
  2809. )
  2810. )
  2811. @reflection.flexi_cache(
  2812. ("schema", InternalTraversal.dp_string),
  2813. ("dblink", InternalTraversal.dp_string),
  2814. ("all_objects", InternalTraversal.dp_string_list),
  2815. )
  2816. def _get_all_constraint_rows(
  2817. self, connection, schema, dblink, all_objects, **kw
  2818. ):
  2819. owner = self.denormalize_schema_name(
  2820. schema or self.default_schema_name
  2821. )
  2822. query = self._constraint_query(owner)
  2823. # since the result is cached a list must be created
  2824. values = list(
  2825. self._run_batches(
  2826. connection,
  2827. query,
  2828. dblink,
  2829. returns_long=False,
  2830. mappings=True,
  2831. all_objects=all_objects,
  2832. )
  2833. )
  2834. return values
  2835. @_handle_synonyms_decorator
  2836. def get_multi_pk_constraint(
  2837. self,
  2838. connection,
  2839. *,
  2840. scope,
  2841. schema,
  2842. filter_names,
  2843. kind,
  2844. dblink=None,
  2845. **kw,
  2846. ):
  2847. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2848. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2849. """
  2850. all_objects = self._get_all_objects(
  2851. connection, schema, scope, kind, filter_names, dblink, **kw
  2852. )
  2853. primary_keys = defaultdict(dict)
  2854. default = ReflectionDefaults.pk_constraint
  2855. for row_dict in self._get_all_constraint_rows(
  2856. connection, schema, dblink, all_objects, **kw
  2857. ):
  2858. if row_dict["constraint_type"] != "P":
  2859. continue
  2860. table_name = self.normalize_name(row_dict["table_name"])
  2861. constraint_name = self.normalize_name(row_dict["constraint_name"])
  2862. column_name = self.normalize_name(row_dict["local_column"])
  2863. table_pk = primary_keys[(schema, table_name)]
  2864. if not table_pk:
  2865. table_pk["name"] = constraint_name
  2866. table_pk["constrained_columns"] = [column_name]
  2867. else:
  2868. table_pk["constrained_columns"].append(column_name)
  2869. return (
  2870. (key, primary_keys[key] if key in primary_keys else default())
  2871. for key in (
  2872. (schema, self.normalize_name(obj_name))
  2873. for obj_name in all_objects
  2874. )
  2875. )
  2876. @reflection.cache
  2877. def get_foreign_keys(
  2878. self,
  2879. connection,
  2880. table_name,
  2881. schema=None,
  2882. **kw,
  2883. ):
  2884. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2885. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2886. """
  2887. data = self.get_multi_foreign_keys(
  2888. connection,
  2889. schema=schema,
  2890. filter_names=[table_name],
  2891. scope=ObjectScope.ANY,
  2892. kind=ObjectKind.ANY,
  2893. **kw,
  2894. )
  2895. return self._value_or_raise(data, table_name, schema)
  2896. @_handle_synonyms_decorator
  2897. def get_multi_foreign_keys(
  2898. self,
  2899. connection,
  2900. *,
  2901. scope,
  2902. schema,
  2903. filter_names,
  2904. kind,
  2905. dblink=None,
  2906. **kw,
  2907. ):
  2908. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  2909. ``oracle_resolve_synonyms`` to resolve names to synonyms
  2910. """
  2911. all_objects = self._get_all_objects(
  2912. connection, schema, scope, kind, filter_names, dblink, **kw
  2913. )
  2914. resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
  2915. owner = self.denormalize_schema_name(
  2916. schema or self.default_schema_name
  2917. )
  2918. all_remote_owners = set()
  2919. fkeys = defaultdict(dict)
  2920. for row_dict in self._get_all_constraint_rows(
  2921. connection, schema, dblink, all_objects, **kw
  2922. ):
  2923. if row_dict["constraint_type"] != "R":
  2924. continue
  2925. table_name = self.normalize_name(row_dict["table_name"])
  2926. constraint_name = self.normalize_name(row_dict["constraint_name"])
  2927. table_fkey = fkeys[(schema, table_name)]
  2928. assert constraint_name is not None
  2929. local_column = self.normalize_name(row_dict["local_column"])
  2930. remote_table = self.normalize_name(row_dict["remote_table"])
  2931. remote_column = self.normalize_name(row_dict["remote_column"])
  2932. remote_owner_orig = row_dict["remote_owner"]
  2933. remote_owner = self.normalize_name(remote_owner_orig)
  2934. if remote_owner_orig is not None:
  2935. all_remote_owners.add(remote_owner_orig)
  2936. if remote_table is None:
  2937. # ticket 363
  2938. if dblink and not dblink.startswith("@"):
  2939. dblink = f"@{dblink}"
  2940. util.warn(
  2941. "Got 'None' querying 'table_name' from "
  2942. f"all_cons_columns{dblink or ''} - does the user have "
  2943. "proper rights to the table?"
  2944. )
  2945. continue
  2946. if constraint_name not in table_fkey:
  2947. table_fkey[constraint_name] = fkey = {
  2948. "name": constraint_name,
  2949. "constrained_columns": [],
  2950. "referred_schema": None,
  2951. "referred_table": remote_table,
  2952. "referred_columns": [],
  2953. "options": {},
  2954. }
  2955. if resolve_synonyms:
  2956. # will be removed below
  2957. fkey["_ref_schema"] = remote_owner
  2958. if schema is not None or remote_owner_orig != owner:
  2959. fkey["referred_schema"] = remote_owner
  2960. delete_rule = row_dict["delete_rule"]
  2961. if delete_rule != "NO ACTION":
  2962. fkey["options"]["ondelete"] = delete_rule
  2963. else:
  2964. fkey = table_fkey[constraint_name]
  2965. fkey["constrained_columns"].append(local_column)
  2966. fkey["referred_columns"].append(remote_column)
  2967. if resolve_synonyms and all_remote_owners:
  2968. query = select(
  2969. dictionary.all_synonyms.c.owner,
  2970. dictionary.all_synonyms.c.table_name,
  2971. dictionary.all_synonyms.c.table_owner,
  2972. dictionary.all_synonyms.c.synonym_name,
  2973. ).where(dictionary.all_synonyms.c.owner.in_(all_remote_owners))
  2974. result = self._execute_reflection(
  2975. connection, query, dblink, returns_long=False
  2976. ).mappings()
  2977. remote_owners_lut = {}
  2978. for row in result:
  2979. synonym_owner = self.normalize_name(row["owner"])
  2980. table_name = self.normalize_name(row["table_name"])
  2981. remote_owners_lut[(synonym_owner, table_name)] = (
  2982. row["table_owner"],
  2983. row["synonym_name"],
  2984. )
  2985. empty = (None, None)
  2986. for table_fkeys in fkeys.values():
  2987. for table_fkey in table_fkeys.values():
  2988. key = (
  2989. table_fkey.pop("_ref_schema"),
  2990. table_fkey["referred_table"],
  2991. )
  2992. remote_owner, syn_name = remote_owners_lut.get(key, empty)
  2993. if syn_name:
  2994. sn = self.normalize_name(syn_name)
  2995. table_fkey["referred_table"] = sn
  2996. if schema is not None or remote_owner != owner:
  2997. ro = self.normalize_name(remote_owner)
  2998. table_fkey["referred_schema"] = ro
  2999. else:
  3000. table_fkey["referred_schema"] = None
  3001. default = ReflectionDefaults.foreign_keys
  3002. return (
  3003. (key, list(fkeys[key].values()) if key in fkeys else default())
  3004. for key in (
  3005. (schema, self.normalize_name(obj_name))
  3006. for obj_name in all_objects
  3007. )
  3008. )
  3009. @reflection.cache
  3010. def get_unique_constraints(
  3011. self, connection, table_name, schema=None, **kw
  3012. ):
  3013. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  3014. ``oracle_resolve_synonyms`` to resolve names to synonyms
  3015. """
  3016. data = self.get_multi_unique_constraints(
  3017. connection,
  3018. schema=schema,
  3019. filter_names=[table_name],
  3020. scope=ObjectScope.ANY,
  3021. kind=ObjectKind.ANY,
  3022. **kw,
  3023. )
  3024. return self._value_or_raise(data, table_name, schema)
  3025. @_handle_synonyms_decorator
  3026. def get_multi_unique_constraints(
  3027. self,
  3028. connection,
  3029. *,
  3030. scope,
  3031. schema,
  3032. filter_names,
  3033. kind,
  3034. dblink=None,
  3035. **kw,
  3036. ):
  3037. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  3038. ``oracle_resolve_synonyms`` to resolve names to synonyms
  3039. """
  3040. all_objects = self._get_all_objects(
  3041. connection, schema, scope, kind, filter_names, dblink, **kw
  3042. )
  3043. unique_cons = defaultdict(dict)
  3044. index_names = {
  3045. row_dict["index_name"]
  3046. for row_dict in self._get_indexes_rows(
  3047. connection, schema, dblink, all_objects, **kw
  3048. )
  3049. }
  3050. for row_dict in self._get_all_constraint_rows(
  3051. connection, schema, dblink, all_objects, **kw
  3052. ):
  3053. if row_dict["constraint_type"] != "U":
  3054. continue
  3055. table_name = self.normalize_name(row_dict["table_name"])
  3056. constraint_name_orig = row_dict["constraint_name"]
  3057. constraint_name = self.normalize_name(constraint_name_orig)
  3058. column_name = self.normalize_name(row_dict["local_column"])
  3059. table_uc = unique_cons[(schema, table_name)]
  3060. assert constraint_name is not None
  3061. if constraint_name not in table_uc:
  3062. table_uc[constraint_name] = uc = {
  3063. "name": constraint_name,
  3064. "column_names": [],
  3065. "duplicates_index": (
  3066. constraint_name
  3067. if constraint_name_orig in index_names
  3068. else None
  3069. ),
  3070. }
  3071. else:
  3072. uc = table_uc[constraint_name]
  3073. uc["column_names"].append(column_name)
  3074. default = ReflectionDefaults.unique_constraints
  3075. return (
  3076. (
  3077. key,
  3078. (
  3079. list(unique_cons[key].values())
  3080. if key in unique_cons
  3081. else default()
  3082. ),
  3083. )
  3084. for key in (
  3085. (schema, self.normalize_name(obj_name))
  3086. for obj_name in all_objects
  3087. )
  3088. )
  3089. @reflection.cache
  3090. def get_view_definition(
  3091. self,
  3092. connection,
  3093. view_name,
  3094. schema=None,
  3095. dblink=None,
  3096. **kw,
  3097. ):
  3098. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  3099. ``oracle_resolve_synonyms`` to resolve names to synonyms
  3100. """
  3101. if kw.get("oracle_resolve_synonyms", False):
  3102. synonyms = self._get_synonyms(
  3103. connection, schema, filter_names=[view_name], dblink=dblink
  3104. )
  3105. if synonyms:
  3106. assert len(synonyms) == 1
  3107. row_dict = synonyms[0]
  3108. dblink = self.normalize_name(row_dict["db_link"])
  3109. schema = row_dict["table_owner"]
  3110. view_name = row_dict["table_name"]
  3111. name = self.denormalize_name(view_name)
  3112. owner = self.denormalize_schema_name(
  3113. schema or self.default_schema_name
  3114. )
  3115. query = (
  3116. select(dictionary.all_views.c.text)
  3117. .where(
  3118. dictionary.all_views.c.view_name == name,
  3119. dictionary.all_views.c.owner == owner,
  3120. )
  3121. .union_all(
  3122. select(dictionary.all_mviews.c.query).where(
  3123. dictionary.all_mviews.c.mview_name == name,
  3124. dictionary.all_mviews.c.owner == owner,
  3125. )
  3126. )
  3127. )
  3128. rp = self._execute_reflection(
  3129. connection, query, dblink, returns_long=False
  3130. ).scalar()
  3131. if rp is None:
  3132. raise exc.NoSuchTableError(
  3133. f"{schema}.{view_name}" if schema else view_name
  3134. )
  3135. else:
  3136. return rp
  3137. @reflection.cache
  3138. def get_check_constraints(
  3139. self, connection, table_name, schema=None, include_all=False, **kw
  3140. ):
  3141. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  3142. ``oracle_resolve_synonyms`` to resolve names to synonyms
  3143. """
  3144. data = self.get_multi_check_constraints(
  3145. connection,
  3146. schema=schema,
  3147. filter_names=[table_name],
  3148. scope=ObjectScope.ANY,
  3149. include_all=include_all,
  3150. kind=ObjectKind.ANY,
  3151. **kw,
  3152. )
  3153. return self._value_or_raise(data, table_name, schema)
  3154. @_handle_synonyms_decorator
  3155. def get_multi_check_constraints(
  3156. self,
  3157. connection,
  3158. *,
  3159. schema,
  3160. filter_names,
  3161. dblink=None,
  3162. scope,
  3163. kind,
  3164. include_all=False,
  3165. **kw,
  3166. ):
  3167. """Supported kw arguments are: ``dblink`` to reflect via a db link;
  3168. ``oracle_resolve_synonyms`` to resolve names to synonyms
  3169. """
  3170. all_objects = self._get_all_objects(
  3171. connection, schema, scope, kind, filter_names, dblink, **kw
  3172. )
  3173. not_null = re.compile(r"..+?. IS NOT NULL$")
  3174. check_constraints = defaultdict(list)
  3175. for row_dict in self._get_all_constraint_rows(
  3176. connection, schema, dblink, all_objects, **kw
  3177. ):
  3178. if row_dict["constraint_type"] != "C":
  3179. continue
  3180. table_name = self.normalize_name(row_dict["table_name"])
  3181. constraint_name = self.normalize_name(row_dict["constraint_name"])
  3182. search_condition = row_dict["search_condition"]
  3183. table_checks = check_constraints[(schema, table_name)]
  3184. if constraint_name is not None and (
  3185. include_all or not not_null.match(search_condition)
  3186. ):
  3187. table_checks.append(
  3188. {"name": constraint_name, "sqltext": search_condition}
  3189. )
  3190. default = ReflectionDefaults.check_constraints
  3191. return (
  3192. (
  3193. key,
  3194. (
  3195. check_constraints[key]
  3196. if key in check_constraints
  3197. else default()
  3198. ),
  3199. )
  3200. for key in (
  3201. (schema, self.normalize_name(obj_name))
  3202. for obj_name in all_objects
  3203. )
  3204. )
  3205. def _list_dblinks(self, connection, dblink=None):
  3206. query = select(dictionary.all_db_links.c.db_link)
  3207. links = self._execute_reflection(
  3208. connection, query, dblink, returns_long=False
  3209. ).scalars()
  3210. return [self.normalize_name(link) for link in links]
  3211. class _OuterJoinColumn(sql.ClauseElement):
  3212. __visit_name__ = "outer_join_column"
  3213. def __init__(self, column):
  3214. self.column = column