Cleaned up the directories
This commit is contained in:
parent
f708506d68
commit
a683fcffea
1340 changed files with 554582 additions and 6840 deletions
|
@ -0,0 +1,67 @@
|
|||
# dialects/oracle/__init__.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from types import ModuleType
|
||||
|
||||
from . import base # noqa
|
||||
from . import cx_oracle # noqa
|
||||
from . import oracledb # noqa
|
||||
from .base import BFILE
|
||||
from .base import BINARY_DOUBLE
|
||||
from .base import BINARY_FLOAT
|
||||
from .base import BLOB
|
||||
from .base import CHAR
|
||||
from .base import CLOB
|
||||
from .base import DATE
|
||||
from .base import DOUBLE_PRECISION
|
||||
from .base import FLOAT
|
||||
from .base import INTERVAL
|
||||
from .base import LONG
|
||||
from .base import NCHAR
|
||||
from .base import NCLOB
|
||||
from .base import NUMBER
|
||||
from .base import NVARCHAR
|
||||
from .base import NVARCHAR2
|
||||
from .base import RAW
|
||||
from .base import REAL
|
||||
from .base import ROWID
|
||||
from .base import TIMESTAMP
|
||||
from .base import VARCHAR
|
||||
from .base import VARCHAR2
|
||||
|
||||
# Alias oracledb also as oracledb_async
|
||||
oracledb_async = type(
|
||||
"oracledb_async", (ModuleType,), {"dialect": oracledb.dialect_async}
|
||||
)
|
||||
|
||||
base.dialect = dialect = cx_oracle.dialect
|
||||
|
||||
__all__ = (
|
||||
"VARCHAR",
|
||||
"NVARCHAR",
|
||||
"CHAR",
|
||||
"NCHAR",
|
||||
"DATE",
|
||||
"NUMBER",
|
||||
"BLOB",
|
||||
"BFILE",
|
||||
"CLOB",
|
||||
"NCLOB",
|
||||
"TIMESTAMP",
|
||||
"RAW",
|
||||
"FLOAT",
|
||||
"DOUBLE_PRECISION",
|
||||
"BINARY_DOUBLE",
|
||||
"BINARY_FLOAT",
|
||||
"LONG",
|
||||
"dialect",
|
||||
"INTERVAL",
|
||||
"VARCHAR2",
|
||||
"NVARCHAR2",
|
||||
"ROWID",
|
||||
"REAL",
|
||||
)
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,507 @@
|
|||
# dialects/oracle/dictionary.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from .types import DATE
|
||||
from .types import LONG
|
||||
from .types import NUMBER
|
||||
from .types import RAW
|
||||
from .types import VARCHAR2
|
||||
from ... import Column
|
||||
from ... import MetaData
|
||||
from ... import Table
|
||||
from ... import table
|
||||
from ...sql.sqltypes import CHAR
|
||||
|
||||
# constants
|
||||
DB_LINK_PLACEHOLDER = "__$sa_dblink$__"
|
||||
# tables
|
||||
dual = table("dual")
|
||||
dictionary_meta = MetaData()
|
||||
|
||||
# NOTE: all the dictionary_meta are aliases because oracle does not like
|
||||
# using the full table@dblink for every column in query, and complains with
|
||||
# ORA-00960: ambiguous column naming in select list
|
||||
all_tables = Table(
|
||||
"all_tables" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("tablespace_name", VARCHAR2(30)),
|
||||
Column("cluster_name", VARCHAR2(128)),
|
||||
Column("iot_name", VARCHAR2(128)),
|
||||
Column("status", VARCHAR2(8)),
|
||||
Column("pct_free", NUMBER),
|
||||
Column("pct_used", NUMBER),
|
||||
Column("ini_trans", NUMBER),
|
||||
Column("max_trans", NUMBER),
|
||||
Column("initial_extent", NUMBER),
|
||||
Column("next_extent", NUMBER),
|
||||
Column("min_extents", NUMBER),
|
||||
Column("max_extents", NUMBER),
|
||||
Column("pct_increase", NUMBER),
|
||||
Column("freelists", NUMBER),
|
||||
Column("freelist_groups", NUMBER),
|
||||
Column("logging", VARCHAR2(3)),
|
||||
Column("backed_up", VARCHAR2(1)),
|
||||
Column("num_rows", NUMBER),
|
||||
Column("blocks", NUMBER),
|
||||
Column("empty_blocks", NUMBER),
|
||||
Column("avg_space", NUMBER),
|
||||
Column("chain_cnt", NUMBER),
|
||||
Column("avg_row_len", NUMBER),
|
||||
Column("avg_space_freelist_blocks", NUMBER),
|
||||
Column("num_freelist_blocks", NUMBER),
|
||||
Column("degree", VARCHAR2(10)),
|
||||
Column("instances", VARCHAR2(10)),
|
||||
Column("cache", VARCHAR2(5)),
|
||||
Column("table_lock", VARCHAR2(8)),
|
||||
Column("sample_size", NUMBER),
|
||||
Column("last_analyzed", DATE),
|
||||
Column("partitioned", VARCHAR2(3)),
|
||||
Column("iot_type", VARCHAR2(12)),
|
||||
Column("temporary", VARCHAR2(1)),
|
||||
Column("secondary", VARCHAR2(1)),
|
||||
Column("nested", VARCHAR2(3)),
|
||||
Column("buffer_pool", VARCHAR2(7)),
|
||||
Column("flash_cache", VARCHAR2(7)),
|
||||
Column("cell_flash_cache", VARCHAR2(7)),
|
||||
Column("row_movement", VARCHAR2(8)),
|
||||
Column("global_stats", VARCHAR2(3)),
|
||||
Column("user_stats", VARCHAR2(3)),
|
||||
Column("duration", VARCHAR2(15)),
|
||||
Column("skip_corrupt", VARCHAR2(8)),
|
||||
Column("monitoring", VARCHAR2(3)),
|
||||
Column("cluster_owner", VARCHAR2(128)),
|
||||
Column("dependencies", VARCHAR2(8)),
|
||||
Column("compression", VARCHAR2(8)),
|
||||
Column("compress_for", VARCHAR2(30)),
|
||||
Column("dropped", VARCHAR2(3)),
|
||||
Column("read_only", VARCHAR2(3)),
|
||||
Column("segment_created", VARCHAR2(3)),
|
||||
Column("result_cache", VARCHAR2(7)),
|
||||
Column("clustering", VARCHAR2(3)),
|
||||
Column("activity_tracking", VARCHAR2(23)),
|
||||
Column("dml_timestamp", VARCHAR2(25)),
|
||||
Column("has_identity", VARCHAR2(3)),
|
||||
Column("container_data", VARCHAR2(3)),
|
||||
Column("inmemory", VARCHAR2(8)),
|
||||
Column("inmemory_priority", VARCHAR2(8)),
|
||||
Column("inmemory_distribute", VARCHAR2(15)),
|
||||
Column("inmemory_compression", VARCHAR2(17)),
|
||||
Column("inmemory_duplicate", VARCHAR2(13)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("duplicated", VARCHAR2(1)),
|
||||
Column("sharded", VARCHAR2(1)),
|
||||
Column("externally_sharded", VARCHAR2(1)),
|
||||
Column("externally_duplicated", VARCHAR2(1)),
|
||||
Column("external", VARCHAR2(3)),
|
||||
Column("hybrid", VARCHAR2(3)),
|
||||
Column("cellmemory", VARCHAR2(24)),
|
||||
Column("containers_default", VARCHAR2(3)),
|
||||
Column("container_map", VARCHAR2(3)),
|
||||
Column("extended_data_link", VARCHAR2(3)),
|
||||
Column("extended_data_link_map", VARCHAR2(3)),
|
||||
Column("inmemory_service", VARCHAR2(12)),
|
||||
Column("inmemory_service_name", VARCHAR2(1000)),
|
||||
Column("container_map_object", VARCHAR2(3)),
|
||||
Column("memoptimize_read", VARCHAR2(8)),
|
||||
Column("memoptimize_write", VARCHAR2(8)),
|
||||
Column("has_sensitive_column", VARCHAR2(3)),
|
||||
Column("admit_null", VARCHAR2(3)),
|
||||
Column("data_link_dml_enabled", VARCHAR2(3)),
|
||||
Column("logical_replication", VARCHAR2(8)),
|
||||
).alias("a_tables")
|
||||
|
||||
all_views = Table(
|
||||
"all_views" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("view_name", VARCHAR2(128), nullable=False),
|
||||
Column("text_length", NUMBER),
|
||||
Column("text", LONG),
|
||||
Column("text_vc", VARCHAR2(4000)),
|
||||
Column("type_text_length", NUMBER),
|
||||
Column("type_text", VARCHAR2(4000)),
|
||||
Column("oid_text_length", NUMBER),
|
||||
Column("oid_text", VARCHAR2(4000)),
|
||||
Column("view_type_owner", VARCHAR2(128)),
|
||||
Column("view_type", VARCHAR2(128)),
|
||||
Column("superview_name", VARCHAR2(128)),
|
||||
Column("editioning_view", VARCHAR2(1)),
|
||||
Column("read_only", VARCHAR2(1)),
|
||||
Column("container_data", VARCHAR2(1)),
|
||||
Column("bequeath", VARCHAR2(12)),
|
||||
Column("origin_con_id", VARCHAR2(256)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("containers_default", VARCHAR2(3)),
|
||||
Column("container_map", VARCHAR2(3)),
|
||||
Column("extended_data_link", VARCHAR2(3)),
|
||||
Column("extended_data_link_map", VARCHAR2(3)),
|
||||
Column("has_sensitive_column", VARCHAR2(3)),
|
||||
Column("admit_null", VARCHAR2(3)),
|
||||
Column("pdb_local_only", VARCHAR2(3)),
|
||||
).alias("a_views")
|
||||
|
||||
all_sequences = Table(
|
||||
"all_sequences" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("sequence_owner", VARCHAR2(128), nullable=False),
|
||||
Column("sequence_name", VARCHAR2(128), nullable=False),
|
||||
Column("min_value", NUMBER),
|
||||
Column("max_value", NUMBER),
|
||||
Column("increment_by", NUMBER, nullable=False),
|
||||
Column("cycle_flag", VARCHAR2(1)),
|
||||
Column("order_flag", VARCHAR2(1)),
|
||||
Column("cache_size", NUMBER, nullable=False),
|
||||
Column("last_number", NUMBER, nullable=False),
|
||||
Column("scale_flag", VARCHAR2(1)),
|
||||
Column("extend_flag", VARCHAR2(1)),
|
||||
Column("sharded_flag", VARCHAR2(1)),
|
||||
Column("session_flag", VARCHAR2(1)),
|
||||
Column("keep_value", VARCHAR2(1)),
|
||||
).alias("a_sequences")
|
||||
|
||||
all_users = Table(
|
||||
"all_users" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("username", VARCHAR2(128), nullable=False),
|
||||
Column("user_id", NUMBER, nullable=False),
|
||||
Column("created", DATE, nullable=False),
|
||||
Column("common", VARCHAR2(3)),
|
||||
Column("oracle_maintained", VARCHAR2(1)),
|
||||
Column("inherited", VARCHAR2(3)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("implicit", VARCHAR2(3)),
|
||||
Column("all_shard", VARCHAR2(3)),
|
||||
Column("external_shard", VARCHAR2(3)),
|
||||
).alias("a_users")
|
||||
|
||||
all_mviews = Table(
|
||||
"all_mviews" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("mview_name", VARCHAR2(128), nullable=False),
|
||||
Column("container_name", VARCHAR2(128), nullable=False),
|
||||
Column("query", LONG),
|
||||
Column("query_len", NUMBER(38)),
|
||||
Column("updatable", VARCHAR2(1)),
|
||||
Column("update_log", VARCHAR2(128)),
|
||||
Column("master_rollback_seg", VARCHAR2(128)),
|
||||
Column("master_link", VARCHAR2(128)),
|
||||
Column("rewrite_enabled", VARCHAR2(1)),
|
||||
Column("rewrite_capability", VARCHAR2(9)),
|
||||
Column("refresh_mode", VARCHAR2(6)),
|
||||
Column("refresh_method", VARCHAR2(8)),
|
||||
Column("build_mode", VARCHAR2(9)),
|
||||
Column("fast_refreshable", VARCHAR2(18)),
|
||||
Column("last_refresh_type", VARCHAR2(8)),
|
||||
Column("last_refresh_date", DATE),
|
||||
Column("last_refresh_end_time", DATE),
|
||||
Column("staleness", VARCHAR2(19)),
|
||||
Column("after_fast_refresh", VARCHAR2(19)),
|
||||
Column("unknown_prebuilt", VARCHAR2(1)),
|
||||
Column("unknown_plsql_func", VARCHAR2(1)),
|
||||
Column("unknown_external_table", VARCHAR2(1)),
|
||||
Column("unknown_consider_fresh", VARCHAR2(1)),
|
||||
Column("unknown_import", VARCHAR2(1)),
|
||||
Column("unknown_trusted_fd", VARCHAR2(1)),
|
||||
Column("compile_state", VARCHAR2(19)),
|
||||
Column("use_no_index", VARCHAR2(1)),
|
||||
Column("stale_since", DATE),
|
||||
Column("num_pct_tables", NUMBER),
|
||||
Column("num_fresh_pct_regions", NUMBER),
|
||||
Column("num_stale_pct_regions", NUMBER),
|
||||
Column("segment_created", VARCHAR2(3)),
|
||||
Column("evaluation_edition", VARCHAR2(128)),
|
||||
Column("unusable_before", VARCHAR2(128)),
|
||||
Column("unusable_beginning", VARCHAR2(128)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("on_query_computation", VARCHAR2(1)),
|
||||
Column("auto", VARCHAR2(3)),
|
||||
).alias("a_mviews")
|
||||
|
||||
all_tab_identity_cols = Table(
|
||||
"all_tab_identity_cols" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(128), nullable=False),
|
||||
Column("generation_type", VARCHAR2(10)),
|
||||
Column("sequence_name", VARCHAR2(128), nullable=False),
|
||||
Column("identity_options", VARCHAR2(298)),
|
||||
).alias("a_tab_identity_cols")
|
||||
|
||||
all_tab_cols = Table(
|
||||
"all_tab_cols" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(128), nullable=False),
|
||||
Column("data_type", VARCHAR2(128)),
|
||||
Column("data_type_mod", VARCHAR2(3)),
|
||||
Column("data_type_owner", VARCHAR2(128)),
|
||||
Column("data_length", NUMBER, nullable=False),
|
||||
Column("data_precision", NUMBER),
|
||||
Column("data_scale", NUMBER),
|
||||
Column("nullable", VARCHAR2(1)),
|
||||
Column("column_id", NUMBER),
|
||||
Column("default_length", NUMBER),
|
||||
Column("data_default", LONG),
|
||||
Column("num_distinct", NUMBER),
|
||||
Column("low_value", RAW(1000)),
|
||||
Column("high_value", RAW(1000)),
|
||||
Column("density", NUMBER),
|
||||
Column("num_nulls", NUMBER),
|
||||
Column("num_buckets", NUMBER),
|
||||
Column("last_analyzed", DATE),
|
||||
Column("sample_size", NUMBER),
|
||||
Column("character_set_name", VARCHAR2(44)),
|
||||
Column("char_col_decl_length", NUMBER),
|
||||
Column("global_stats", VARCHAR2(3)),
|
||||
Column("user_stats", VARCHAR2(3)),
|
||||
Column("avg_col_len", NUMBER),
|
||||
Column("char_length", NUMBER),
|
||||
Column("char_used", VARCHAR2(1)),
|
||||
Column("v80_fmt_image", VARCHAR2(3)),
|
||||
Column("data_upgraded", VARCHAR2(3)),
|
||||
Column("hidden_column", VARCHAR2(3)),
|
||||
Column("virtual_column", VARCHAR2(3)),
|
||||
Column("segment_column_id", NUMBER),
|
||||
Column("internal_column_id", NUMBER, nullable=False),
|
||||
Column("histogram", VARCHAR2(15)),
|
||||
Column("qualified_col_name", VARCHAR2(4000)),
|
||||
Column("user_generated", VARCHAR2(3)),
|
||||
Column("default_on_null", VARCHAR2(3)),
|
||||
Column("identity_column", VARCHAR2(3)),
|
||||
Column("evaluation_edition", VARCHAR2(128)),
|
||||
Column("unusable_before", VARCHAR2(128)),
|
||||
Column("unusable_beginning", VARCHAR2(128)),
|
||||
Column("collation", VARCHAR2(100)),
|
||||
Column("collated_column_id", NUMBER),
|
||||
).alias("a_tab_cols")
|
||||
|
||||
all_tab_comments = Table(
|
||||
"all_tab_comments" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_type", VARCHAR2(11)),
|
||||
Column("comments", VARCHAR2(4000)),
|
||||
Column("origin_con_id", NUMBER),
|
||||
).alias("a_tab_comments")
|
||||
|
||||
all_col_comments = Table(
|
||||
"all_col_comments" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(128), nullable=False),
|
||||
Column("comments", VARCHAR2(4000)),
|
||||
Column("origin_con_id", NUMBER),
|
||||
).alias("a_col_comments")
|
||||
|
||||
all_mview_comments = Table(
|
||||
"all_mview_comments" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("mview_name", VARCHAR2(128), nullable=False),
|
||||
Column("comments", VARCHAR2(4000)),
|
||||
).alias("a_mview_comments")
|
||||
|
||||
all_ind_columns = Table(
|
||||
"all_ind_columns" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("index_owner", VARCHAR2(128), nullable=False),
|
||||
Column("index_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(4000)),
|
||||
Column("column_position", NUMBER, nullable=False),
|
||||
Column("column_length", NUMBER, nullable=False),
|
||||
Column("char_length", NUMBER),
|
||||
Column("descend", VARCHAR2(4)),
|
||||
Column("collated_column_id", NUMBER),
|
||||
).alias("a_ind_columns")
|
||||
|
||||
all_indexes = Table(
|
||||
"all_indexes" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("index_name", VARCHAR2(128), nullable=False),
|
||||
Column("index_type", VARCHAR2(27)),
|
||||
Column("table_owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_type", CHAR(11)),
|
||||
Column("uniqueness", VARCHAR2(9)),
|
||||
Column("compression", VARCHAR2(13)),
|
||||
Column("prefix_length", NUMBER),
|
||||
Column("tablespace_name", VARCHAR2(30)),
|
||||
Column("ini_trans", NUMBER),
|
||||
Column("max_trans", NUMBER),
|
||||
Column("initial_extent", NUMBER),
|
||||
Column("next_extent", NUMBER),
|
||||
Column("min_extents", NUMBER),
|
||||
Column("max_extents", NUMBER),
|
||||
Column("pct_increase", NUMBER),
|
||||
Column("pct_threshold", NUMBER),
|
||||
Column("include_column", NUMBER),
|
||||
Column("freelists", NUMBER),
|
||||
Column("freelist_groups", NUMBER),
|
||||
Column("pct_free", NUMBER),
|
||||
Column("logging", VARCHAR2(3)),
|
||||
Column("blevel", NUMBER),
|
||||
Column("leaf_blocks", NUMBER),
|
||||
Column("distinct_keys", NUMBER),
|
||||
Column("avg_leaf_blocks_per_key", NUMBER),
|
||||
Column("avg_data_blocks_per_key", NUMBER),
|
||||
Column("clustering_factor", NUMBER),
|
||||
Column("status", VARCHAR2(8)),
|
||||
Column("num_rows", NUMBER),
|
||||
Column("sample_size", NUMBER),
|
||||
Column("last_analyzed", DATE),
|
||||
Column("degree", VARCHAR2(40)),
|
||||
Column("instances", VARCHAR2(40)),
|
||||
Column("partitioned", VARCHAR2(3)),
|
||||
Column("temporary", VARCHAR2(1)),
|
||||
Column("generated", VARCHAR2(1)),
|
||||
Column("secondary", VARCHAR2(1)),
|
||||
Column("buffer_pool", VARCHAR2(7)),
|
||||
Column("flash_cache", VARCHAR2(7)),
|
||||
Column("cell_flash_cache", VARCHAR2(7)),
|
||||
Column("user_stats", VARCHAR2(3)),
|
||||
Column("duration", VARCHAR2(15)),
|
||||
Column("pct_direct_access", NUMBER),
|
||||
Column("ityp_owner", VARCHAR2(128)),
|
||||
Column("ityp_name", VARCHAR2(128)),
|
||||
Column("parameters", VARCHAR2(1000)),
|
||||
Column("global_stats", VARCHAR2(3)),
|
||||
Column("domidx_status", VARCHAR2(12)),
|
||||
Column("domidx_opstatus", VARCHAR2(6)),
|
||||
Column("funcidx_status", VARCHAR2(8)),
|
||||
Column("join_index", VARCHAR2(3)),
|
||||
Column("iot_redundant_pkey_elim", VARCHAR2(3)),
|
||||
Column("dropped", VARCHAR2(3)),
|
||||
Column("visibility", VARCHAR2(9)),
|
||||
Column("domidx_management", VARCHAR2(14)),
|
||||
Column("segment_created", VARCHAR2(3)),
|
||||
Column("orphaned_entries", VARCHAR2(3)),
|
||||
Column("indexing", VARCHAR2(7)),
|
||||
Column("auto", VARCHAR2(3)),
|
||||
).alias("a_indexes")
|
||||
|
||||
all_ind_expressions = Table(
|
||||
"all_ind_expressions" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("index_owner", VARCHAR2(128), nullable=False),
|
||||
Column("index_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_expression", LONG),
|
||||
Column("column_position", NUMBER, nullable=False),
|
||||
).alias("a_ind_expressions")
|
||||
|
||||
all_constraints = Table(
|
||||
"all_constraints" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128)),
|
||||
Column("constraint_name", VARCHAR2(128)),
|
||||
Column("constraint_type", VARCHAR2(1)),
|
||||
Column("table_name", VARCHAR2(128)),
|
||||
Column("search_condition", LONG),
|
||||
Column("search_condition_vc", VARCHAR2(4000)),
|
||||
Column("r_owner", VARCHAR2(128)),
|
||||
Column("r_constraint_name", VARCHAR2(128)),
|
||||
Column("delete_rule", VARCHAR2(9)),
|
||||
Column("status", VARCHAR2(8)),
|
||||
Column("deferrable", VARCHAR2(14)),
|
||||
Column("deferred", VARCHAR2(9)),
|
||||
Column("validated", VARCHAR2(13)),
|
||||
Column("generated", VARCHAR2(14)),
|
||||
Column("bad", VARCHAR2(3)),
|
||||
Column("rely", VARCHAR2(4)),
|
||||
Column("last_change", DATE),
|
||||
Column("index_owner", VARCHAR2(128)),
|
||||
Column("index_name", VARCHAR2(128)),
|
||||
Column("invalid", VARCHAR2(7)),
|
||||
Column("view_related", VARCHAR2(14)),
|
||||
Column("origin_con_id", VARCHAR2(256)),
|
||||
).alias("a_constraints")
|
||||
|
||||
all_cons_columns = Table(
|
||||
"all_cons_columns" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("constraint_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(4000)),
|
||||
Column("position", NUMBER),
|
||||
).alias("a_cons_columns")
|
||||
|
||||
# TODO figure out if it's still relevant, since there is no mention from here
|
||||
# https://docs.oracle.com/en/database/oracle/oracle-database/21/refrn/ALL_DB_LINKS.html
|
||||
# original note:
|
||||
# using user_db_links here since all_db_links appears
|
||||
# to have more restricted permissions.
|
||||
# https://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm
|
||||
# will need to hear from more users if we are doing
|
||||
# the right thing here. See [ticket:2619]
|
||||
all_db_links = Table(
|
||||
"all_db_links" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("db_link", VARCHAR2(128), nullable=False),
|
||||
Column("username", VARCHAR2(128)),
|
||||
Column("host", VARCHAR2(2000)),
|
||||
Column("created", DATE, nullable=False),
|
||||
Column("hidden", VARCHAR2(3)),
|
||||
Column("shard_internal", VARCHAR2(3)),
|
||||
Column("valid", VARCHAR2(3)),
|
||||
Column("intra_cdb", VARCHAR2(3)),
|
||||
).alias("a_db_links")
|
||||
|
||||
all_synonyms = Table(
|
||||
"all_synonyms" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128)),
|
||||
Column("synonym_name", VARCHAR2(128)),
|
||||
Column("table_owner", VARCHAR2(128)),
|
||||
Column("table_name", VARCHAR2(128)),
|
||||
Column("db_link", VARCHAR2(128)),
|
||||
Column("origin_con_id", VARCHAR2(256)),
|
||||
).alias("a_synonyms")
|
||||
|
||||
all_objects = Table(
|
||||
"all_objects" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("object_name", VARCHAR2(128), nullable=False),
|
||||
Column("subobject_name", VARCHAR2(128)),
|
||||
Column("object_id", NUMBER, nullable=False),
|
||||
Column("data_object_id", NUMBER),
|
||||
Column("object_type", VARCHAR2(23)),
|
||||
Column("created", DATE, nullable=False),
|
||||
Column("last_ddl_time", DATE, nullable=False),
|
||||
Column("timestamp", VARCHAR2(19)),
|
||||
Column("status", VARCHAR2(7)),
|
||||
Column("temporary", VARCHAR2(1)),
|
||||
Column("generated", VARCHAR2(1)),
|
||||
Column("secondary", VARCHAR2(1)),
|
||||
Column("namespace", NUMBER, nullable=False),
|
||||
Column("edition_name", VARCHAR2(128)),
|
||||
Column("sharing", VARCHAR2(13)),
|
||||
Column("editionable", VARCHAR2(1)),
|
||||
Column("oracle_maintained", VARCHAR2(1)),
|
||||
Column("application", VARCHAR2(1)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("duplicated", VARCHAR2(1)),
|
||||
Column("sharded", VARCHAR2(1)),
|
||||
Column("created_appid", NUMBER),
|
||||
Column("created_vsnid", NUMBER),
|
||||
Column("modified_appid", NUMBER),
|
||||
Column("modified_vsnid", NUMBER),
|
||||
).alias("a_objects")
|
|
@ -0,0 +1,311 @@
|
|||
# dialects/oracle/oracledb.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
.. dialect:: oracle+oracledb
|
||||
:name: python-oracledb
|
||||
:dbapi: oracledb
|
||||
:connectstring: oracle+oracledb://user:pass@hostname:port[/dbname][?service_name=<service>[&key=value&key=value...]]
|
||||
:url: https://oracle.github.io/python-oracledb/
|
||||
|
||||
python-oracledb is released by Oracle to supersede the cx_Oracle driver.
|
||||
It is fully compatible with cx_Oracle and features both a "thin" client
|
||||
mode that requires no dependencies, as well as a "thick" mode that uses
|
||||
the Oracle Client Interface in the same way as cx_Oracle.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`cx_oracle` - all of cx_Oracle's notes apply to the oracledb driver
|
||||
as well.
|
||||
|
||||
The SQLAlchemy ``oracledb`` dialect provides both a sync and an async
|
||||
implementation under the same dialect name. The proper version is
|
||||
selected depending on how the engine is created:
|
||||
|
||||
* calling :func:`_sa.create_engine` with ``oracle+oracledb://...`` will
|
||||
automatically select the sync version, e.g.::
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
sync_engine = create_engine("oracle+oracledb://scott:tiger@localhost/?service_name=XEPDB1")
|
||||
|
||||
* calling :func:`_asyncio.create_async_engine` with
|
||||
``oracle+oracledb://...`` will automatically select the async version,
|
||||
e.g.::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
asyncio_engine = create_async_engine("oracle+oracledb://scott:tiger@localhost/?service_name=XEPDB1")
|
||||
|
||||
The asyncio version of the dialect may also be specified explicitly using the
|
||||
``oracledb_async`` suffix, as::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
asyncio_engine = create_async_engine("oracle+oracledb_async://scott:tiger@localhost/?service_name=XEPDB1")
|
||||
|
||||
.. versionadded:: 2.0.25 added support for the async version of oracledb.
|
||||
|
||||
Thick mode support
|
||||
------------------
|
||||
|
||||
By default the ``python-oracledb`` is started in thin mode, that does not
|
||||
require oracle client libraries to be installed in the system. The
|
||||
``python-oracledb`` driver also support a "thick" mode, that behaves
|
||||
similarly to ``cx_oracle`` and requires that Oracle Client Interface (OCI)
|
||||
is installed.
|
||||
|
||||
To enable this mode, the user may call ``oracledb.init_oracle_client``
|
||||
manually, or by passing the parameter ``thick_mode=True`` to
|
||||
:func:`_sa.create_engine`. To pass custom arguments to ``init_oracle_client``,
|
||||
like the ``lib_dir`` path, a dict may be passed to this parameter, as in::
|
||||
|
||||
engine = sa.create_engine("oracle+oracledb://...", thick_mode={
|
||||
"lib_dir": "/path/to/oracle/client/lib", "driver_name": "my-app"
|
||||
})
|
||||
|
||||
.. seealso::
|
||||
|
||||
https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.init_oracle_client
|
||||
|
||||
|
||||
.. versionadded:: 2.0.0 added support for oracledb driver.
|
||||
|
||||
""" # noqa
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import re
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .cx_oracle import OracleDialect_cx_oracle as _OracleDialect_cx_oracle
|
||||
from ... import exc
|
||||
from ... import pool
|
||||
from ...connectors.asyncio import AsyncAdapt_dbapi_connection
|
||||
from ...connectors.asyncio import AsyncAdapt_dbapi_cursor
|
||||
from ...connectors.asyncio import AsyncAdaptFallback_dbapi_connection
|
||||
from ...util import asbool
|
||||
from ...util import await_fallback
|
||||
from ...util import await_only
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from oracledb import AsyncConnection
|
||||
from oracledb import AsyncCursor
|
||||
|
||||
|
||||
class OracleDialect_oracledb(_OracleDialect_cx_oracle):
|
||||
supports_statement_cache = True
|
||||
driver = "oracledb"
|
||||
_min_version = (1,)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
auto_convert_lobs=True,
|
||||
coerce_to_decimal=True,
|
||||
arraysize=None,
|
||||
encoding_errors=None,
|
||||
thick_mode=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
auto_convert_lobs,
|
||||
coerce_to_decimal,
|
||||
arraysize,
|
||||
encoding_errors,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if self.dbapi is not None and (
|
||||
thick_mode or isinstance(thick_mode, dict)
|
||||
):
|
||||
kw = thick_mode if isinstance(thick_mode, dict) else {}
|
||||
self.dbapi.init_oracle_client(**kw)
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
import oracledb
|
||||
|
||||
return oracledb
|
||||
|
||||
@classmethod
|
||||
def is_thin_mode(cls, connection):
|
||||
return connection.connection.dbapi_connection.thin
|
||||
|
||||
@classmethod
|
||||
def get_async_dialect_cls(cls, url):
|
||||
return OracleDialectAsync_oracledb
|
||||
|
||||
def _load_version(self, dbapi_module):
|
||||
version = (0, 0, 0)
|
||||
if dbapi_module is not None:
|
||||
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", dbapi_module.version)
|
||||
if m:
|
||||
version = tuple(
|
||||
int(x) for x in m.group(1, 2, 3) if x is not None
|
||||
)
|
||||
self.oracledb_ver = version
|
||||
if (
|
||||
self.oracledb_ver > (0, 0, 0)
|
||||
and self.oracledb_ver < self._min_version
|
||||
):
|
||||
raise exc.InvalidRequestError(
|
||||
f"oracledb version {self._min_version} and above are supported"
|
||||
)
|
||||
|
||||
|
||||
class AsyncAdapt_oracledb_cursor(AsyncAdapt_dbapi_cursor):
|
||||
_cursor: AsyncCursor
|
||||
__slots__ = ()
|
||||
|
||||
@property
|
||||
def outputtypehandler(self):
|
||||
return self._cursor.outputtypehandler
|
||||
|
||||
@outputtypehandler.setter
|
||||
def outputtypehandler(self, value):
|
||||
self._cursor.outputtypehandler = value
|
||||
|
||||
def var(self, *args, **kwargs):
|
||||
return self._cursor.var(*args, **kwargs)
|
||||
|
||||
def close(self):
|
||||
self._rows.clear()
|
||||
self._cursor.close()
|
||||
|
||||
def setinputsizes(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self._cursor.setinputsizes(*args, **kwargs)
|
||||
|
||||
def _aenter_cursor(self, cursor: AsyncCursor) -> AsyncCursor:
|
||||
try:
|
||||
return cursor.__enter__()
|
||||
except Exception as error:
|
||||
self._adapt_connection._handle_exception(error)
|
||||
|
||||
async def _execute_async(self, operation, parameters):
|
||||
# override to not use mutex, oracledb already has mutex
|
||||
|
||||
if parameters is None:
|
||||
result = await self._cursor.execute(operation)
|
||||
else:
|
||||
result = await self._cursor.execute(operation, parameters)
|
||||
|
||||
if self._cursor.description and not self.server_side:
|
||||
self._rows = collections.deque(await self._cursor.fetchall())
|
||||
return result
|
||||
|
||||
async def _executemany_async(
|
||||
self,
|
||||
operation,
|
||||
seq_of_parameters,
|
||||
):
|
||||
# override to not use mutex, oracledb already has mutex
|
||||
return await self._cursor.executemany(operation, seq_of_parameters)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
|
||||
self.close()
|
||||
|
||||
|
||||
class AsyncAdapt_oracledb_connection(AsyncAdapt_dbapi_connection):
|
||||
_connection: AsyncConnection
|
||||
__slots__ = ()
|
||||
|
||||
thin = True
|
||||
|
||||
_cursor_cls = AsyncAdapt_oracledb_cursor
|
||||
_ss_cursor_cls = None
|
||||
|
||||
@property
|
||||
def autocommit(self):
|
||||
return self._connection.autocommit
|
||||
|
||||
@autocommit.setter
|
||||
def autocommit(self, value):
|
||||
self._connection.autocommit = value
|
||||
|
||||
@property
|
||||
def outputtypehandler(self):
|
||||
return self._connection.outputtypehandler
|
||||
|
||||
@outputtypehandler.setter
|
||||
def outputtypehandler(self, value):
|
||||
self._connection.outputtypehandler = value
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
return self._connection.version
|
||||
|
||||
@property
|
||||
def stmtcachesize(self):
|
||||
return self._connection.stmtcachesize
|
||||
|
||||
@stmtcachesize.setter
|
||||
def stmtcachesize(self, value):
|
||||
self._connection.stmtcachesize = value
|
||||
|
||||
def cursor(self):
|
||||
return AsyncAdapt_oracledb_cursor(self)
|
||||
|
||||
|
||||
class AsyncAdaptFallback_oracledb_connection(
|
||||
AsyncAdaptFallback_dbapi_connection, AsyncAdapt_oracledb_connection
|
||||
):
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
class OracledbAdaptDBAPI:
|
||||
def __init__(self, oracledb) -> None:
|
||||
self.oracledb = oracledb
|
||||
|
||||
for k, v in self.oracledb.__dict__.items():
|
||||
if k != "connect":
|
||||
self.__dict__[k] = v
|
||||
|
||||
def connect(self, *arg, **kw):
|
||||
async_fallback = kw.pop("async_fallback", False)
|
||||
creator_fn = kw.pop("async_creator_fn", self.oracledb.connect_async)
|
||||
|
||||
if asbool(async_fallback):
|
||||
return AsyncAdaptFallback_oracledb_connection(
|
||||
self, await_fallback(creator_fn(*arg, **kw))
|
||||
)
|
||||
|
||||
else:
|
||||
return AsyncAdapt_oracledb_connection(
|
||||
self, await_only(creator_fn(*arg, **kw))
|
||||
)
|
||||
|
||||
|
||||
class OracleDialectAsync_oracledb(OracleDialect_oracledb):
|
||||
is_async = True
|
||||
supports_statement_cache = True
|
||||
|
||||
_min_version = (2,)
|
||||
|
||||
# thick_mode mode is not supported by asyncio, oracledb will raise
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
import oracledb
|
||||
|
||||
return OracledbAdaptDBAPI(oracledb)
|
||||
|
||||
@classmethod
|
||||
def get_pool_class(cls, url):
|
||||
async_fallback = url.query.get("async_fallback", False)
|
||||
|
||||
if asbool(async_fallback):
|
||||
return pool.FallbackAsyncAdaptedQueuePool
|
||||
else:
|
||||
return pool.AsyncAdaptedQueuePool
|
||||
|
||||
def get_driver_connection(self, connection):
|
||||
return connection._connection
|
||||
|
||||
|
||||
dialect = OracleDialect_oracledb
|
||||
dialect_async = OracleDialectAsync_oracledb
|
|
@ -0,0 +1,220 @@
|
|||
# dialects/oracle/provision.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from ... import create_engine
|
||||
from ... import exc
|
||||
from ... import inspect
|
||||
from ...engine import url as sa_url
|
||||
from ...testing.provision import configure_follower
|
||||
from ...testing.provision import create_db
|
||||
from ...testing.provision import drop_all_schema_objects_post_tables
|
||||
from ...testing.provision import drop_all_schema_objects_pre_tables
|
||||
from ...testing.provision import drop_db
|
||||
from ...testing.provision import follower_url_from_main
|
||||
from ...testing.provision import log
|
||||
from ...testing.provision import post_configure_engine
|
||||
from ...testing.provision import run_reap_dbs
|
||||
from ...testing.provision import set_default_schema_on_connection
|
||||
from ...testing.provision import stop_test_class_outside_fixtures
|
||||
from ...testing.provision import temp_table_keyword_args
|
||||
from ...testing.provision import update_db_opts
|
||||
|
||||
|
||||
@create_db.for_db("oracle")
|
||||
def _oracle_create_db(cfg, eng, ident):
|
||||
# NOTE: make sure you've run "ALTER DATABASE default tablespace users" or
|
||||
# similar, so that the default tablespace is not "system"; reflection will
|
||||
# fail otherwise
|
||||
with eng.begin() as conn:
|
||||
conn.exec_driver_sql("create user %s identified by xe" % ident)
|
||||
conn.exec_driver_sql("create user %s_ts1 identified by xe" % ident)
|
||||
conn.exec_driver_sql("create user %s_ts2 identified by xe" % ident)
|
||||
conn.exec_driver_sql("grant dba to %s" % (ident,))
|
||||
conn.exec_driver_sql("grant unlimited tablespace to %s" % ident)
|
||||
conn.exec_driver_sql("grant unlimited tablespace to %s_ts1" % ident)
|
||||
conn.exec_driver_sql("grant unlimited tablespace to %s_ts2" % ident)
|
||||
# these are needed to create materialized views
|
||||
conn.exec_driver_sql("grant create table to %s" % ident)
|
||||
conn.exec_driver_sql("grant create table to %s_ts1" % ident)
|
||||
conn.exec_driver_sql("grant create table to %s_ts2" % ident)
|
||||
|
||||
|
||||
@configure_follower.for_db("oracle")
|
||||
def _oracle_configure_follower(config, ident):
|
||||
config.test_schema = "%s_ts1" % ident
|
||||
config.test_schema_2 = "%s_ts2" % ident
|
||||
|
||||
|
||||
def _ora_drop_ignore(conn, dbname):
|
||||
try:
|
||||
conn.exec_driver_sql("drop user %s cascade" % dbname)
|
||||
log.info("Reaped db: %s", dbname)
|
||||
return True
|
||||
except exc.DatabaseError as err:
|
||||
log.warning("couldn't drop db: %s", err)
|
||||
return False
|
||||
|
||||
|
||||
@drop_all_schema_objects_pre_tables.for_db("oracle")
|
||||
def _ora_drop_all_schema_objects_pre_tables(cfg, eng):
|
||||
_purge_recyclebin(eng)
|
||||
_purge_recyclebin(eng, cfg.test_schema)
|
||||
|
||||
|
||||
@drop_all_schema_objects_post_tables.for_db("oracle")
|
||||
def _ora_drop_all_schema_objects_post_tables(cfg, eng):
|
||||
with eng.begin() as conn:
|
||||
for syn in conn.dialect._get_synonyms(conn, None, None, None):
|
||||
conn.exec_driver_sql(f"drop synonym {syn['synonym_name']}")
|
||||
|
||||
for syn in conn.dialect._get_synonyms(
|
||||
conn, cfg.test_schema, None, None
|
||||
):
|
||||
conn.exec_driver_sql(
|
||||
f"drop synonym {cfg.test_schema}.{syn['synonym_name']}"
|
||||
)
|
||||
|
||||
for tmp_table in inspect(conn).get_temp_table_names():
|
||||
conn.exec_driver_sql(f"drop table {tmp_table}")
|
||||
|
||||
|
||||
@drop_db.for_db("oracle")
|
||||
def _oracle_drop_db(cfg, eng, ident):
|
||||
with eng.begin() as conn:
|
||||
# cx_Oracle seems to occasionally leak open connections when a large
|
||||
# suite it run, even if we confirm we have zero references to
|
||||
# connection objects.
|
||||
# while there is a "kill session" command in Oracle,
|
||||
# it unfortunately does not release the connection sufficiently.
|
||||
_ora_drop_ignore(conn, ident)
|
||||
_ora_drop_ignore(conn, "%s_ts1" % ident)
|
||||
_ora_drop_ignore(conn, "%s_ts2" % ident)
|
||||
|
||||
|
||||
@stop_test_class_outside_fixtures.for_db("oracle")
|
||||
def _ora_stop_test_class_outside_fixtures(config, db, cls):
|
||||
try:
|
||||
_purge_recyclebin(db)
|
||||
except exc.DatabaseError as err:
|
||||
log.warning("purge recyclebin command failed: %s", err)
|
||||
|
||||
# clear statement cache on all connections that were used
|
||||
# https://github.com/oracle/python-cx_Oracle/issues/519
|
||||
|
||||
for cx_oracle_conn in _all_conns:
|
||||
try:
|
||||
sc = cx_oracle_conn.stmtcachesize
|
||||
except db.dialect.dbapi.InterfaceError:
|
||||
# connection closed
|
||||
pass
|
||||
else:
|
||||
cx_oracle_conn.stmtcachesize = 0
|
||||
cx_oracle_conn.stmtcachesize = sc
|
||||
_all_conns.clear()
|
||||
|
||||
|
||||
def _purge_recyclebin(eng, schema=None):
|
||||
with eng.begin() as conn:
|
||||
if schema is None:
|
||||
# run magic command to get rid of identity sequences
|
||||
# https://floo.bar/2019/11/29/drop-the-underlying-sequence-of-an-identity-column/ # noqa: E501
|
||||
conn.exec_driver_sql("purge recyclebin")
|
||||
else:
|
||||
# per user: https://community.oracle.com/tech/developers/discussion/2255402/how-to-clear-dba-recyclebin-for-a-particular-user # noqa: E501
|
||||
for owner, object_name, type_ in conn.exec_driver_sql(
|
||||
"select owner, object_name,type from "
|
||||
"dba_recyclebin where owner=:schema and type='TABLE'",
|
||||
{"schema": conn.dialect.denormalize_name(schema)},
|
||||
).all():
|
||||
conn.exec_driver_sql(f'purge {type_} {owner}."{object_name}"')
|
||||
|
||||
|
||||
_all_conns = set()
|
||||
|
||||
|
||||
@post_configure_engine.for_db("oracle")
|
||||
def _oracle_post_configure_engine(url, engine, follower_ident):
|
||||
from sqlalchemy import event
|
||||
|
||||
@event.listens_for(engine, "checkout")
|
||||
def checkout(dbapi_con, con_record, con_proxy):
|
||||
_all_conns.add(dbapi_con)
|
||||
|
||||
@event.listens_for(engine, "checkin")
|
||||
def checkin(dbapi_connection, connection_record):
|
||||
# work around cx_Oracle issue:
|
||||
# https://github.com/oracle/python-cx_Oracle/issues/530
|
||||
# invalidate oracle connections that had 2pc set up
|
||||
if "cx_oracle_xid" in connection_record.info:
|
||||
connection_record.invalidate()
|
||||
|
||||
|
||||
@run_reap_dbs.for_db("oracle")
|
||||
def _reap_oracle_dbs(url, idents):
|
||||
log.info("db reaper connecting to %r", url)
|
||||
eng = create_engine(url)
|
||||
with eng.begin() as conn:
|
||||
log.info("identifiers in file: %s", ", ".join(idents))
|
||||
|
||||
to_reap = conn.exec_driver_sql(
|
||||
"select u.username from all_users u where username "
|
||||
"like 'TEST_%' and not exists (select username "
|
||||
"from v$session where username=u.username)"
|
||||
)
|
||||
all_names = {username.lower() for (username,) in to_reap}
|
||||
to_drop = set()
|
||||
for name in all_names:
|
||||
if name.endswith("_ts1") or name.endswith("_ts2"):
|
||||
continue
|
||||
elif name in idents:
|
||||
to_drop.add(name)
|
||||
if "%s_ts1" % name in all_names:
|
||||
to_drop.add("%s_ts1" % name)
|
||||
if "%s_ts2" % name in all_names:
|
||||
to_drop.add("%s_ts2" % name)
|
||||
|
||||
dropped = total = 0
|
||||
for total, username in enumerate(to_drop, 1):
|
||||
if _ora_drop_ignore(conn, username):
|
||||
dropped += 1
|
||||
log.info(
|
||||
"Dropped %d out of %d stale databases detected", dropped, total
|
||||
)
|
||||
|
||||
|
||||
@follower_url_from_main.for_db("oracle")
|
||||
def _oracle_follower_url_from_main(url, ident):
|
||||
url = sa_url.make_url(url)
|
||||
return url.set(username=ident, password="xe")
|
||||
|
||||
|
||||
@temp_table_keyword_args.for_db("oracle")
|
||||
def _oracle_temp_table_keyword_args(cfg, eng):
|
||||
return {
|
||||
"prefixes": ["GLOBAL TEMPORARY"],
|
||||
"oracle_on_commit": "PRESERVE ROWS",
|
||||
}
|
||||
|
||||
|
||||
@set_default_schema_on_connection.for_db("oracle")
|
||||
def _oracle_set_default_schema_on_connection(
|
||||
cfg, dbapi_connection, schema_name
|
||||
):
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute("ALTER SESSION SET CURRENT_SCHEMA=%s" % schema_name)
|
||||
cursor.close()
|
||||
|
||||
|
||||
@update_db_opts.for_db("oracle")
|
||||
def _update_db_opts(db_url, db_opts, options):
|
||||
"""Set database options (db_opts) for a test database that we created."""
|
||||
if (
|
||||
options.oracledb_thick_mode
|
||||
and sa_url.make_url(db_url).get_driver_name() == "oracledb"
|
||||
):
|
||||
db_opts["thick_mode"] = True
|
|
@ -0,0 +1,287 @@
|
|||
# dialects/oracle/types.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
from typing import Optional
|
||||
from typing import Type
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from ... import exc
|
||||
from ...sql import sqltypes
|
||||
from ...types import NVARCHAR
|
||||
from ...types import VARCHAR
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...engine.interfaces import Dialect
|
||||
from ...sql.type_api import _LiteralProcessorType
|
||||
|
||||
|
||||
class RAW(sqltypes._Binary):
|
||||
__visit_name__ = "RAW"
|
||||
|
||||
|
||||
OracleRaw = RAW
|
||||
|
||||
|
||||
class NCLOB(sqltypes.Text):
|
||||
__visit_name__ = "NCLOB"
|
||||
|
||||
|
||||
class VARCHAR2(VARCHAR):
|
||||
__visit_name__ = "VARCHAR2"
|
||||
|
||||
|
||||
NVARCHAR2 = NVARCHAR
|
||||
|
||||
|
||||
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
|
||||
__visit_name__ = "NUMBER"
|
||||
|
||||
def __init__(self, precision=None, scale=None, asdecimal=None):
|
||||
if asdecimal is None:
|
||||
asdecimal = bool(scale and scale > 0)
|
||||
|
||||
super().__init__(precision=precision, scale=scale, asdecimal=asdecimal)
|
||||
|
||||
def adapt(self, impltype):
|
||||
ret = super().adapt(impltype)
|
||||
# leave a hint for the DBAPI handler
|
||||
ret._is_oracle_number = True
|
||||
return ret
|
||||
|
||||
@property
|
||||
def _type_affinity(self):
|
||||
if bool(self.scale and self.scale > 0):
|
||||
return sqltypes.Numeric
|
||||
else:
|
||||
return sqltypes.Integer
|
||||
|
||||
|
||||
class FLOAT(sqltypes.FLOAT):
|
||||
"""Oracle FLOAT.
|
||||
|
||||
This is the same as :class:`_sqltypes.FLOAT` except that
|
||||
an Oracle-specific :paramref:`_oracle.FLOAT.binary_precision`
|
||||
parameter is accepted, and
|
||||
the :paramref:`_sqltypes.Float.precision` parameter is not accepted.
|
||||
|
||||
Oracle FLOAT types indicate precision in terms of "binary precision", which
|
||||
defaults to 126. For a REAL type, the value is 63. This parameter does not
|
||||
cleanly map to a specific number of decimal places but is roughly
|
||||
equivalent to the desired number of decimal places divided by 0.3103.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "FLOAT"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
binary_precision=None,
|
||||
asdecimal=False,
|
||||
decimal_return_scale=None,
|
||||
):
|
||||
r"""
|
||||
Construct a FLOAT
|
||||
|
||||
:param binary_precision: Oracle binary precision value to be rendered
|
||||
in DDL. This may be approximated to the number of decimal characters
|
||||
using the formula "decimal precision = 0.30103 * binary precision".
|
||||
The default value used by Oracle for FLOAT / DOUBLE PRECISION is 126.
|
||||
|
||||
:param asdecimal: See :paramref:`_sqltypes.Float.asdecimal`
|
||||
|
||||
:param decimal_return_scale: See
|
||||
:paramref:`_sqltypes.Float.decimal_return_scale`
|
||||
|
||||
"""
|
||||
super().__init__(
|
||||
asdecimal=asdecimal, decimal_return_scale=decimal_return_scale
|
||||
)
|
||||
self.binary_precision = binary_precision
|
||||
|
||||
|
||||
class BINARY_DOUBLE(sqltypes.Double):
|
||||
__visit_name__ = "BINARY_DOUBLE"
|
||||
|
||||
|
||||
class BINARY_FLOAT(sqltypes.Float):
|
||||
__visit_name__ = "BINARY_FLOAT"
|
||||
|
||||
|
||||
class BFILE(sqltypes.LargeBinary):
|
||||
__visit_name__ = "BFILE"
|
||||
|
||||
|
||||
class LONG(sqltypes.Text):
|
||||
__visit_name__ = "LONG"
|
||||
|
||||
|
||||
class _OracleDateLiteralRender:
|
||||
def _literal_processor_datetime(self, dialect):
|
||||
def process(value):
|
||||
if getattr(value, "microsecond", None):
|
||||
value = (
|
||||
f"""TO_TIMESTAMP"""
|
||||
f"""('{value.isoformat().replace("T", " ")}', """
|
||||
"""'YYYY-MM-DD HH24:MI:SS.FF')"""
|
||||
)
|
||||
else:
|
||||
value = (
|
||||
f"""TO_DATE"""
|
||||
f"""('{value.isoformat().replace("T", " ")}', """
|
||||
"""'YYYY-MM-DD HH24:MI:SS')"""
|
||||
)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def _literal_processor_date(self, dialect):
|
||||
def process(value):
|
||||
if getattr(value, "microsecond", None):
|
||||
value = (
|
||||
f"""TO_TIMESTAMP"""
|
||||
f"""('{value.isoformat().split("T")[0]}', """
|
||||
"""'YYYY-MM-DD')"""
|
||||
)
|
||||
else:
|
||||
value = (
|
||||
f"""TO_DATE"""
|
||||
f"""('{value.isoformat().split("T")[0]}', """
|
||||
"""'YYYY-MM-DD')"""
|
||||
)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class DATE(_OracleDateLiteralRender, sqltypes.DateTime):
|
||||
"""Provide the oracle DATE type.
|
||||
|
||||
This type has no special Python behavior, except that it subclasses
|
||||
:class:`_types.DateTime`; this is to suit the fact that the Oracle
|
||||
``DATE`` type supports a time value.
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "DATE"
|
||||
|
||||
def literal_processor(self, dialect):
|
||||
return self._literal_processor_datetime(dialect)
|
||||
|
||||
def _compare_type_affinity(self, other):
|
||||
return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
|
||||
|
||||
|
||||
class _OracleDate(_OracleDateLiteralRender, sqltypes.Date):
|
||||
def literal_processor(self, dialect):
|
||||
return self._literal_processor_date(dialect)
|
||||
|
||||
|
||||
class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
|
||||
__visit_name__ = "INTERVAL"
|
||||
|
||||
def __init__(self, day_precision=None, second_precision=None):
|
||||
"""Construct an INTERVAL.
|
||||
|
||||
Note that only DAY TO SECOND intervals are currently supported.
|
||||
This is due to a lack of support for YEAR TO MONTH intervals
|
||||
within available DBAPIs.
|
||||
|
||||
:param day_precision: the day precision value. this is the number of
|
||||
digits to store for the day field. Defaults to "2"
|
||||
:param second_precision: the second precision value. this is the
|
||||
number of digits to store for the fractional seconds field.
|
||||
Defaults to "6".
|
||||
|
||||
"""
|
||||
self.day_precision = day_precision
|
||||
self.second_precision = second_precision
|
||||
|
||||
@classmethod
|
||||
def _adapt_from_generic_interval(cls, interval):
|
||||
return INTERVAL(
|
||||
day_precision=interval.day_precision,
|
||||
second_precision=interval.second_precision,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def adapt_emulated_to_native(
|
||||
cls, interval: sqltypes.Interval, **kw # type: ignore[override]
|
||||
):
|
||||
return INTERVAL(
|
||||
day_precision=interval.day_precision,
|
||||
second_precision=interval.second_precision,
|
||||
)
|
||||
|
||||
@property
|
||||
def _type_affinity(self):
|
||||
return sqltypes.Interval
|
||||
|
||||
def as_generic(self, allow_nulltype=False):
|
||||
return sqltypes.Interval(
|
||||
native=True,
|
||||
second_precision=self.second_precision,
|
||||
day_precision=self.day_precision,
|
||||
)
|
||||
|
||||
@property
|
||||
def python_type(self) -> Type[dt.timedelta]:
|
||||
return dt.timedelta
|
||||
|
||||
def literal_processor(
|
||||
self, dialect: Dialect
|
||||
) -> Optional[_LiteralProcessorType[dt.timedelta]]:
|
||||
def process(value: dt.timedelta) -> str:
|
||||
return f"NUMTODSINTERVAL({value.total_seconds()}, 'SECOND')"
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class TIMESTAMP(sqltypes.TIMESTAMP):
|
||||
"""Oracle implementation of ``TIMESTAMP``, which supports additional
|
||||
Oracle-specific modes
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, timezone: bool = False, local_timezone: bool = False):
|
||||
"""Construct a new :class:`_oracle.TIMESTAMP`.
|
||||
|
||||
:param timezone: boolean. Indicates that the TIMESTAMP type should
|
||||
use Oracle's ``TIMESTAMP WITH TIME ZONE`` datatype.
|
||||
|
||||
:param local_timezone: boolean. Indicates that the TIMESTAMP type
|
||||
should use Oracle's ``TIMESTAMP WITH LOCAL TIME ZONE`` datatype.
|
||||
|
||||
|
||||
"""
|
||||
if timezone and local_timezone:
|
||||
raise exc.ArgumentError(
|
||||
"timezone and local_timezone are mutually exclusive"
|
||||
)
|
||||
super().__init__(timezone=timezone)
|
||||
self.local_timezone = local_timezone
|
||||
|
||||
|
||||
class ROWID(sqltypes.TypeEngine):
|
||||
"""Oracle ROWID type.
|
||||
|
||||
When used in a cast() or similar, generates ROWID.
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "ROWID"
|
||||
|
||||
|
||||
class _OracleBoolean(sqltypes.Boolean):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.NUMBER
|
Loading…
Add table
Add a link
Reference in a new issue