gooderp18绿色标准版
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

732 lines
27KB

  1. # -*- coding: utf-8 -*-
  2. # Part of Odoo. See LICENSE file for full copyright and licensing details.
  3. # pylint: disable=sql-injection
  4. from __future__ import annotations
  5. import enum
  6. import json
  7. import logging
  8. import re
  9. from binascii import crc32
  10. from collections import defaultdict
  11. from typing import TYPE_CHECKING
  12. if TYPE_CHECKING:
  13. from odoo.fields import Field
  14. from collections.abc import Iterable
  15. import psycopg2
  16. import psycopg2.sql as pgsql
  17. from .misc import named_to_positional_printf
  18. __all__ = [
  19. "SQL",
  20. "create_index",
  21. "create_unique_index",
  22. "drop_view_if_exists",
  23. "escape_psql",
  24. "index_exists",
  25. "make_identifier",
  26. "make_index_name",
  27. "reverse_order",
  28. ]
  29. _schema = logging.getLogger('odoo.schema')
  30. IDENT_RE = re.compile(r'^[a-z0-9_][a-z0-9_$\-]*$', re.I)
  31. _CONFDELTYPES = {
  32. 'RESTRICT': 'r',
  33. 'NO ACTION': 'a',
  34. 'CASCADE': 'c',
  35. 'SET NULL': 'n',
  36. 'SET DEFAULT': 'd',
  37. }
  38. class SQL:
  39. """ An object that wraps SQL code with its parameters, like::
  40. sql = SQL("UPDATE TABLE foo SET a = %s, b = %s", 'hello', 42)
  41. cr.execute(sql)
  42. The code is given as a ``%``-format string, and supports either positional
  43. arguments (with `%s`) or named arguments (with `%(name)s`). Escaped
  44. characters (like ``"%%"``) are not supported, though. The arguments are
  45. meant to be merged into the code using the `%` formatting operator.
  46. The SQL wrapper is designed to be composable: the arguments can be either
  47. actual parameters, or SQL objects themselves::
  48. sql = SQL(
  49. "UPDATE TABLE %s SET %s",
  50. SQL.identifier(tablename),
  51. SQL("%s = %s", SQL.identifier(columnname), value),
  52. )
  53. The combined SQL code is given by ``sql.code``, while the corresponding
  54. combined parameters are given by the list ``sql.params``. This allows to
  55. combine any number of SQL terms without having to separately combine their
  56. parameters, which can be tedious, bug-prone, and is the main downside of
  57. `psycopg2.sql <https://www.psycopg.org/docs/sql.html>`.
  58. The second purpose of the wrapper is to discourage SQL injections. Indeed,
  59. if ``code`` is a string literal (not a dynamic string), then the SQL object
  60. made with ``code`` is guaranteed to be safe, provided the SQL objects
  61. within its parameters are themselves safe.
  62. The wrapper may also contain some metadata ``to_flush``. If not ``None``,
  63. its value is a field which the SQL code depends on. The metadata of a
  64. wrapper and its parts can be accessed by the iterator ``sql.to_flush``.
  65. """
  66. __slots__ = ('__code', '__params', '__to_flush')
  67. __code: str
  68. __params: tuple
  69. __to_flush: tuple
  70. # pylint: disable=keyword-arg-before-vararg
  71. def __init__(self, code: (str | SQL) = "", /, *args, to_flush: (Field | None) = None, **kwargs):
  72. if isinstance(code, SQL):
  73. if args or kwargs or to_flush:
  74. raise TypeError("SQL() unexpected arguments when code has type SQL")
  75. self.__code = code.__code
  76. self.__params = code.__params
  77. self.__to_flush = code.__to_flush
  78. return
  79. # validate the format of code and parameters
  80. if args and kwargs:
  81. raise TypeError("SQL() takes either positional arguments, or named arguments")
  82. if kwargs:
  83. code, args = named_to_positional_printf(code, kwargs)
  84. elif not args:
  85. code % () # check that code does not contain %s
  86. self.__code = code
  87. self.__params = ()
  88. self.__to_flush = () if to_flush is None else (to_flush,)
  89. return
  90. code_list = []
  91. params_list = []
  92. to_flush_list = []
  93. for arg in args:
  94. if isinstance(arg, SQL):
  95. code_list.append(arg.__code)
  96. params_list.extend(arg.__params)
  97. to_flush_list.extend(arg.__to_flush)
  98. else:
  99. code_list.append("%s")
  100. params_list.append(arg)
  101. if to_flush is not None:
  102. to_flush_list.append(to_flush)
  103. self.__code = code % tuple(code_list)
  104. self.__params = tuple(params_list)
  105. self.__to_flush = tuple(to_flush_list)
  106. @property
  107. def code(self) -> str:
  108. """ Return the combined SQL code string. """
  109. return self.__code
  110. @property
  111. def params(self) -> list:
  112. """ Return the combined SQL code params as a list of values. """
  113. return list(self.__params)
  114. @property
  115. def to_flush(self) -> Iterable[Field]:
  116. """ Return an iterator on the fields to flush in the metadata of
  117. ``self`` and all of its parts.
  118. """
  119. return self.__to_flush
  120. def __repr__(self):
  121. return f"SQL({', '.join(map(repr, [self.__code, *self.__params]))})"
  122. def __bool__(self):
  123. return bool(self.__code)
  124. def __eq__(self, other):
  125. return isinstance(other, SQL) and self.__code == other.__code and self.__params == other.__params
  126. def __iter__(self):
  127. """ Yields ``self.code`` and ``self.params``. This was introduced for
  128. backward compatibility, as it enables to access the SQL and parameters
  129. by deconstructing the object::
  130. sql = SQL(...)
  131. code, params = sql
  132. """
  133. yield self.code
  134. yield self.params
  135. def join(self, args: Iterable) -> SQL:
  136. """ Join SQL objects or parameters with ``self`` as a separator. """
  137. args = list(args)
  138. # optimizations for special cases
  139. if len(args) == 0:
  140. return SQL()
  141. if len(args) == 1 and isinstance(args[0], SQL):
  142. return args[0]
  143. if not self.__params:
  144. return SQL(self.__code.join("%s" for arg in args), *args)
  145. # general case: alternate args with self
  146. items = [self] * (len(args) * 2 - 1)
  147. for index, arg in enumerate(args):
  148. items[index * 2] = arg
  149. return SQL("%s" * len(items), *items)
  150. @classmethod
  151. def identifier(cls, name: str, subname: (str | None) = None, to_flush: (Field | None) = None) -> SQL:
  152. """ Return an SQL object that represents an identifier. """
  153. assert name.isidentifier() or IDENT_RE.match(name), f"{name!r} invalid for SQL.identifier()"
  154. if subname is None:
  155. return cls(f'"{name}"', to_flush=to_flush)
  156. assert subname.isidentifier() or IDENT_RE.match(subname), f"{subname!r} invalid for SQL.identifier()"
  157. return cls(f'"{name}"."{subname}"', to_flush=to_flush)
  158. def existing_tables(cr, tablenames):
  159. """ Return the names of existing tables among ``tablenames``. """
  160. cr.execute(SQL("""
  161. SELECT c.relname
  162. FROM pg_class c
  163. JOIN pg_namespace n ON (n.oid = c.relnamespace)
  164. WHERE c.relname IN %s
  165. AND c.relkind IN ('r', 'v', 'm')
  166. AND n.nspname = current_schema
  167. """, tuple(tablenames)))
  168. return [row[0] for row in cr.fetchall()]
  169. def table_exists(cr, tablename):
  170. """ Return whether the given table exists. """
  171. return len(existing_tables(cr, {tablename})) == 1
  172. class TableKind(enum.Enum):
  173. Regular = 'r'
  174. Temporary = 't'
  175. View = 'v'
  176. Materialized = 'm'
  177. Foreign = 'f'
  178. Other = None
  179. def table_kind(cr, tablename: str) -> TableKind | None:
  180. """ Return the kind of a table, if ``tablename`` is a regular or foreign
  181. table, or a view (ignores indexes, sequences, toast tables, and partitioned
  182. tables; unlogged tables are considered regular)
  183. """
  184. cr.execute(SQL("""
  185. SELECT c.relkind, c.relpersistence
  186. FROM pg_class c
  187. JOIN pg_namespace n ON (n.oid = c.relnamespace)
  188. WHERE c.relname = %s
  189. AND n.nspname = current_schema
  190. """, tablename))
  191. if not cr.rowcount:
  192. return None
  193. kind, persistence = cr.fetchone()
  194. # special case: permanent, temporary, and unlogged tables differ by their
  195. # relpersistence, they're all "ordinary" (relkind = r)
  196. if kind == 'r':
  197. return TableKind.Temporary if persistence == 't' else TableKind.Regular
  198. try:
  199. return TableKind(kind)
  200. except ValueError:
  201. # NB: or raise? unclear if it makes sense to allow table_kind to
  202. # "work" with something like an index or sequence
  203. return TableKind.Other
  204. # prescribed column order by type: columns aligned on 4 bytes, columns aligned
  205. # on 1 byte, columns aligned on 8 bytes(values have been chosen to minimize
  206. # padding in rows; unknown column types are put last)
  207. SQL_ORDER_BY_TYPE = defaultdict(lambda: 16, {
  208. 'int4': 1, # 4 bytes aligned on 4 bytes
  209. 'varchar': 2, # variable aligned on 4 bytes
  210. 'date': 3, # 4 bytes aligned on 4 bytes
  211. 'jsonb': 4, # jsonb
  212. 'text': 5, # variable aligned on 4 bytes
  213. 'numeric': 6, # variable aligned on 4 bytes
  214. 'bool': 7, # 1 byte aligned on 1 byte
  215. 'timestamp': 8, # 8 bytes aligned on 8 bytes
  216. 'float8': 9, # 8 bytes aligned on 8 bytes
  217. })
  218. def create_model_table(cr, tablename, comment=None, columns=()):
  219. """ Create the table for a model. """
  220. colspecs = [
  221. SQL('id SERIAL NOT NULL'),
  222. *(SQL("%s %s", SQL.identifier(colname), SQL(coltype)) for colname, coltype, _ in columns),
  223. SQL('PRIMARY KEY(id)'),
  224. ]
  225. queries = [
  226. SQL("CREATE TABLE %s (%s)", SQL.identifier(tablename), SQL(", ").join(colspecs)),
  227. ]
  228. if comment:
  229. queries.append(SQL(
  230. "COMMENT ON TABLE %s IS %s",
  231. SQL.identifier(tablename), comment,
  232. ))
  233. for colname, _, colcomment in columns:
  234. queries.append(SQL(
  235. "COMMENT ON COLUMN %s IS %s",
  236. SQL.identifier(tablename, colname), colcomment,
  237. ))
  238. cr.execute(SQL("; ").join(queries))
  239. _schema.debug("Table %r: created", tablename)
  240. def table_columns(cr, tablename):
  241. """ Return a dict mapping column names to their configuration. The latter is
  242. a dict with the data from the table ``information_schema.columns``.
  243. """
  244. # Do not select the field `character_octet_length` from `information_schema.columns`
  245. # because specific access right restriction in the context of shared hosting (Heroku, OVH, ...)
  246. # might prevent a postgres user to read this field.
  247. cr.execute(SQL(
  248. ''' SELECT column_name, udt_name, character_maximum_length, is_nullable
  249. FROM information_schema.columns WHERE table_name=%s ''',
  250. tablename,
  251. ))
  252. return {row['column_name']: row for row in cr.dictfetchall()}
  253. def column_exists(cr, tablename, columnname):
  254. """ Return whether the given column exists. """
  255. cr.execute(SQL(
  256. """ SELECT 1 FROM information_schema.columns
  257. WHERE table_name=%s AND column_name=%s """,
  258. tablename, columnname,
  259. ))
  260. return cr.rowcount
  261. def create_column(cr, tablename, columnname, columntype, comment=None):
  262. """ Create a column with the given type. """
  263. sql = SQL(
  264. "ALTER TABLE %s ADD COLUMN %s %s %s",
  265. SQL.identifier(tablename),
  266. SQL.identifier(columnname),
  267. SQL(columntype),
  268. SQL("DEFAULT false" if columntype.upper() == 'BOOLEAN' else ""),
  269. )
  270. if comment:
  271. sql = SQL("%s; %s", sql, SQL(
  272. "COMMENT ON COLUMN %s IS %s",
  273. SQL.identifier(tablename, columnname), comment,
  274. ))
  275. cr.execute(sql)
  276. _schema.debug("Table %r: added column %r of type %s", tablename, columnname, columntype)
  277. def rename_column(cr, tablename, columnname1, columnname2):
  278. """ Rename the given column. """
  279. cr.execute(SQL(
  280. "ALTER TABLE %s RENAME COLUMN %s TO %s",
  281. SQL.identifier(tablename),
  282. SQL.identifier(columnname1),
  283. SQL.identifier(columnname2),
  284. ))
  285. _schema.debug("Table %r: renamed column %r to %r", tablename, columnname1, columnname2)
  286. def convert_column(cr, tablename, columnname, columntype):
  287. """ Convert the column to the given type. """
  288. using = SQL("%s::%s", SQL.identifier(columnname), SQL(columntype))
  289. _convert_column(cr, tablename, columnname, columntype, using)
  290. def convert_column_translatable(cr, tablename, columnname, columntype):
  291. """ Convert the column from/to a 'jsonb' translated field column. """
  292. drop_index(cr, make_index_name(tablename, columnname), tablename)
  293. if columntype == "jsonb":
  294. using = SQL(
  295. "CASE WHEN %s IS NOT NULL THEN jsonb_build_object('en_US', %s::varchar) END",
  296. SQL.identifier(columnname), SQL.identifier(columnname),
  297. )
  298. else:
  299. using = SQL("%s->>'en_US'", SQL.identifier(columnname))
  300. _convert_column(cr, tablename, columnname, columntype, using)
  301. def _convert_column(cr, tablename, columnname, columntype, using: SQL):
  302. query = SQL(
  303. "ALTER TABLE %s ALTER COLUMN %s DROP DEFAULT, ALTER COLUMN %s TYPE %s USING %s",
  304. SQL.identifier(tablename), SQL.identifier(columnname),
  305. SQL.identifier(columnname), SQL(columntype), using,
  306. )
  307. try:
  308. with cr.savepoint(flush=False):
  309. cr.execute(query, log_exceptions=False)
  310. except psycopg2.NotSupportedError:
  311. drop_depending_views(cr, tablename, columnname)
  312. cr.execute(query)
  313. _schema.debug("Table %r: column %r changed to type %s", tablename, columnname, columntype)
  314. def drop_depending_views(cr, table, column):
  315. """drop views depending on a field to allow the ORM to resize it in-place"""
  316. for v, k in get_depending_views(cr, table, column):
  317. cr.execute(SQL(
  318. "DROP %s IF EXISTS %s CASCADE",
  319. SQL("MATERIALIZED VIEW" if k == "m" else "VIEW"),
  320. SQL.identifier(v),
  321. ))
  322. _schema.debug("Drop view %r", v)
  323. def get_depending_views(cr, table, column):
  324. # http://stackoverflow.com/a/11773226/75349
  325. cr.execute(SQL("""
  326. SELECT distinct quote_ident(dependee.relname), dependee.relkind
  327. FROM pg_depend
  328. JOIN pg_rewrite ON pg_depend.objid = pg_rewrite.oid
  329. JOIN pg_class as dependee ON pg_rewrite.ev_class = dependee.oid
  330. JOIN pg_class as dependent ON pg_depend.refobjid = dependent.oid
  331. JOIN pg_attribute ON pg_depend.refobjid = pg_attribute.attrelid
  332. AND pg_depend.refobjsubid = pg_attribute.attnum
  333. WHERE dependent.relname = %s
  334. AND pg_attribute.attnum > 0
  335. AND pg_attribute.attname = %s
  336. AND dependee.relkind in ('v', 'm')
  337. """, table, column))
  338. return cr.fetchall()
  339. def set_not_null(cr, tablename, columnname):
  340. """ Add a NOT NULL constraint on the given column. """
  341. query = SQL(
  342. "ALTER TABLE %s ALTER COLUMN %s SET NOT NULL",
  343. SQL.identifier(tablename), SQL.identifier(columnname),
  344. )
  345. try:
  346. with cr.savepoint(flush=False):
  347. cr.execute(query, log_exceptions=False)
  348. _schema.debug("Table %r: column %r: added constraint NOT NULL", tablename, columnname)
  349. except Exception:
  350. raise Exception("Table %r: unable to set NOT NULL on column %r", tablename, columnname)
  351. def drop_not_null(cr, tablename, columnname):
  352. """ Drop the NOT NULL constraint on the given column. """
  353. cr.execute(SQL(
  354. "ALTER TABLE %s ALTER COLUMN %s DROP NOT NULL",
  355. SQL.identifier(tablename), SQL.identifier(columnname),
  356. ))
  357. _schema.debug("Table %r: column %r: dropped constraint NOT NULL", tablename, columnname)
  358. def constraint_definition(cr, tablename, constraintname):
  359. """ Return the given constraint's definition. """
  360. cr.execute(SQL("""
  361. SELECT COALESCE(d.description, pg_get_constraintdef(c.oid))
  362. FROM pg_constraint c
  363. JOIN pg_class t ON t.oid = c.conrelid
  364. LEFT JOIN pg_description d ON c.oid = d.objoid
  365. WHERE t.relname = %s AND conname = %s
  366. """, tablename, constraintname))
  367. return cr.fetchone()[0] if cr.rowcount else None
  368. def add_constraint(cr, tablename, constraintname, definition):
  369. """ Add a constraint on the given table. """
  370. # There is a fundamental issue with SQL implementation that messes up with queries
  371. # using %, for details check the PR discussion of this patch #188716. To be fixed
  372. # in master. Here we use instead psycopg.sql
  373. query1 = pgsql.SQL("ALTER TABLE {} ADD CONSTRAINT {} {}").format(
  374. pgsql.Identifier(tablename), pgsql.Identifier(constraintname), pgsql.SQL(definition),
  375. )
  376. query2 = SQL(
  377. "COMMENT ON CONSTRAINT %s ON %s IS %s",
  378. SQL.identifier(constraintname), SQL.identifier(tablename), definition,
  379. )
  380. try:
  381. with cr.savepoint(flush=False):
  382. cr.execute(query1, log_exceptions=False)
  383. cr.execute(query2, log_exceptions=False)
  384. _schema.debug("Table %r: added constraint %r as %s", tablename, constraintname, definition)
  385. except Exception:
  386. raise Exception("Table %r: unable to add constraint %r as %s", tablename, constraintname, definition)
  387. def drop_constraint(cr, tablename, constraintname):
  388. """ drop the given constraint. """
  389. try:
  390. with cr.savepoint(flush=False):
  391. cr.execute(SQL(
  392. "ALTER TABLE %s DROP CONSTRAINT %s",
  393. SQL.identifier(tablename), SQL.identifier(constraintname),
  394. ))
  395. _schema.debug("Table %r: dropped constraint %r", tablename, constraintname)
  396. except Exception:
  397. _schema.warning("Table %r: unable to drop constraint %r!", tablename, constraintname)
  398. def add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
  399. """ Create the given foreign key, and return ``True``. """
  400. cr.execute(SQL(
  401. "ALTER TABLE %s ADD FOREIGN KEY (%s) REFERENCES %s(%s) ON DELETE %s",
  402. SQL.identifier(tablename1), SQL.identifier(columnname1),
  403. SQL.identifier(tablename2), SQL.identifier(columnname2),
  404. SQL(ondelete),
  405. ))
  406. _schema.debug("Table %r: added foreign key %r references %r(%r) ON DELETE %s",
  407. tablename1, columnname1, tablename2, columnname2, ondelete)
  408. return True
  409. def get_foreign_keys(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
  410. deltype = _CONFDELTYPES[ondelete.upper()]
  411. cr.execute(SQL(
  412. """
  413. SELECT fk.conname as name
  414. FROM pg_constraint AS fk
  415. JOIN pg_class AS c1 ON fk.conrelid = c1.oid
  416. JOIN pg_class AS c2 ON fk.confrelid = c2.oid
  417. JOIN pg_attribute AS a1 ON a1.attrelid = c1.oid AND fk.conkey[1] = a1.attnum
  418. JOIN pg_attribute AS a2 ON a2.attrelid = c2.oid AND fk.confkey[1] = a2.attnum
  419. WHERE fk.contype = 'f'
  420. AND c1.relname = %s
  421. AND a1.attname = %s
  422. AND c2.relname = %s
  423. AND a2.attname = %s
  424. AND fk.confdeltype = %s
  425. """,
  426. tablename1, columnname1, tablename2, columnname2, deltype,
  427. ))
  428. return [r[0] for r in cr.fetchall()]
  429. def fix_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete):
  430. """ Update the foreign keys between tables to match the given one, and
  431. return ``True`` if the given foreign key has been recreated.
  432. """
  433. # Do not use 'information_schema' here, as those views are awfully slow!
  434. deltype = _CONFDELTYPES.get(ondelete.upper(), 'a')
  435. cr.execute(SQL(
  436. """ SELECT con.conname, c2.relname, a2.attname, con.confdeltype as deltype
  437. FROM pg_constraint as con, pg_class as c1, pg_class as c2,
  438. pg_attribute as a1, pg_attribute as a2
  439. WHERE con.contype='f' AND con.conrelid=c1.oid AND con.confrelid=c2.oid
  440. AND array_lower(con.conkey, 1)=1 AND con.conkey[1]=a1.attnum
  441. AND array_lower(con.confkey, 1)=1 AND con.confkey[1]=a2.attnum
  442. AND a1.attrelid=c1.oid AND a2.attrelid=c2.oid
  443. AND c1.relname=%s AND a1.attname=%s """,
  444. tablename1, columnname1,
  445. ))
  446. found = False
  447. for fk in cr.fetchall():
  448. if not found and fk[1:] == (tablename2, columnname2, deltype):
  449. found = True
  450. else:
  451. drop_constraint(cr, tablename1, fk[0])
  452. if not found:
  453. return add_foreign_key(cr, tablename1, columnname1, tablename2, columnname2, ondelete)
  454. def index_exists(cr, indexname):
  455. """ Return whether the given index exists. """
  456. cr.execute(SQL("SELECT 1 FROM pg_indexes WHERE indexname=%s", indexname))
  457. return cr.rowcount
  458. def check_index_exist(cr, indexname):
  459. assert index_exists(cr, indexname), f"{indexname} does not exist"
  460. def create_index(cr, indexname, tablename, expressions, method='btree', where=''):
  461. """ Create the given index unless it exists. """
  462. if index_exists(cr, indexname):
  463. return
  464. cr.execute(SQL(
  465. "CREATE INDEX %s ON %s USING %s (%s)%s",
  466. SQL.identifier(indexname),
  467. SQL.identifier(tablename),
  468. SQL(method),
  469. SQL(", ").join(SQL(expression) for expression in expressions),
  470. SQL(" WHERE %s", SQL(where)) if where else SQL(),
  471. ))
  472. _schema.debug("Table %r: created index %r (%s)", tablename, indexname, ", ".join(expressions))
  473. def create_unique_index(cr, indexname, tablename, expressions):
  474. """ Create the given index unless it exists. """
  475. if index_exists(cr, indexname):
  476. return
  477. cr.execute(SQL(
  478. "CREATE UNIQUE INDEX %s ON %s (%s)",
  479. SQL.identifier(indexname),
  480. SQL.identifier(tablename),
  481. SQL(", ").join(SQL(expression) for expression in expressions),
  482. ))
  483. _schema.debug("Table %r: created index %r (%s)", tablename, indexname, ", ".join(expressions))
  484. def drop_index(cr, indexname, tablename):
  485. """ Drop the given index if it exists. """
  486. cr.execute(SQL("DROP INDEX IF EXISTS %s", SQL.identifier(indexname)))
  487. _schema.debug("Table %r: dropped index %r", tablename, indexname)
  488. def drop_view_if_exists(cr, viewname):
  489. kind = table_kind(cr, viewname)
  490. if kind == TableKind.View:
  491. cr.execute(SQL("DROP VIEW %s CASCADE", SQL.identifier(viewname)))
  492. elif kind == TableKind.Materialized:
  493. cr.execute(SQL("DROP MATERIALIZED VIEW %s CASCADE", SQL.identifier(viewname)))
  494. def escape_psql(to_escape):
  495. return to_escape.replace('\\', r'\\').replace('%', r'\%').replace('_', r'\_')
  496. def pg_varchar(size=0):
  497. """ Returns the VARCHAR declaration for the provided size:
  498. * If no size (or an empty or negative size is provided) return an
  499. 'infinite' VARCHAR
  500. * Otherwise return a VARCHAR(n)
  501. :param int size: varchar size, optional
  502. :rtype: str
  503. """
  504. if size:
  505. if not isinstance(size, int):
  506. raise ValueError("VARCHAR parameter should be an int, got %s" % type(size))
  507. if size > 0:
  508. return 'VARCHAR(%d)' % size
  509. return 'VARCHAR'
  510. def reverse_order(order):
  511. """ Reverse an ORDER BY clause """
  512. items = []
  513. for item in order.split(','):
  514. item = item.lower().split()
  515. direction = 'asc' if item[1:] == ['desc'] else 'desc'
  516. items.append('%s %s' % (item[0], direction))
  517. return ', '.join(items)
  518. def increment_fields_skiplock(records, *fields):
  519. """
  520. Increment 'friendly' the given `fields` of the current `records`.
  521. If record is locked, we just skip the update.
  522. It doesn't invalidate the cache since the update is not critical.
  523. :param records: recordset to update
  524. :param fields: integer fields to increment
  525. :returns: whether the specified fields were incremented on any record.
  526. :rtype: bool
  527. """
  528. if not records:
  529. return False
  530. for field in fields:
  531. assert records._fields[field].type == 'integer'
  532. cr = records._cr
  533. tablename = records._table
  534. cr.execute(SQL(
  535. """
  536. UPDATE %s
  537. SET %s
  538. WHERE id IN (SELECT id FROM %s WHERE id = ANY(%s) FOR UPDATE SKIP LOCKED)
  539. """,
  540. SQL.identifier(tablename),
  541. SQL(', ').join(
  542. SQL("%s = COALESCE(%s, 0) + 1", SQL.identifier(field), SQL.identifier(field))
  543. for field in fields
  544. ),
  545. SQL.identifier(tablename),
  546. records.ids,
  547. ))
  548. return bool(cr.rowcount)
  549. def value_to_translated_trigram_pattern(value):
  550. """ Escape value to match a translated field's trigram index content
  551. The trigram index function jsonb_path_query_array("column_name", '$.*')::text
  552. uses all translations' representations to build the indexed text. So the
  553. original text needs to be JSON-escaped correctly to match it.
  554. :param str value: value provided in domain
  555. :return: a pattern to match the indexed text
  556. """
  557. if len(value) < 3:
  558. # matching less than 3 characters will not take advantage of the index
  559. return '%'
  560. # apply JSON escaping to value; the argument ensure_ascii=False prevents
  561. # json.dumps from escaping unicode to ascii, which is consistent with the
  562. # index function jsonb_path_query_array("column_name", '$.*')::text
  563. json_escaped = json.dumps(value, ensure_ascii=False)[1:-1]
  564. # apply PG wildcard escaping to JSON-escaped text
  565. wildcard_escaped = re.sub(r'(_|%|\\)', r'\\\1', json_escaped)
  566. # add wildcards around it to get the pattern
  567. return f"%{wildcard_escaped}%"
  568. def pattern_to_translated_trigram_pattern(pattern):
  569. """ Escape pattern to match a translated field's trigram index content
  570. The trigram index function jsonb_path_query_array("column_name", '$.*')::text
  571. uses all translations' representations to build the indexed text. So the
  572. original pattern needs to be JSON-escaped correctly to match it.
  573. :param str pattern: value provided in domain
  574. :return: a pattern to match the indexed text
  575. """
  576. # find the parts around (non-escaped) wildcard characters (_, %)
  577. sub_patterns = re.findall(r'''
  578. (
  579. (?:.)*? # 0 or more charaters including the newline character
  580. (?<!\\)(?:\\\\)* # 0 or even number of backslashes to promise the next wildcard character is not escaped
  581. )
  582. (?:_|%|$) # a non-escaped wildcard charater or end of the string
  583. ''', pattern, flags=re.VERBOSE | re.DOTALL)
  584. # unescape PG wildcards from each sub pattern (\% becomes %)
  585. sub_texts = [re.sub(r'\\(.|$)', r'\1', t, flags=re.DOTALL) for t in sub_patterns]
  586. # apply JSON escaping to sub texts having at least 3 characters (" becomes \");
  587. # the argument ensure_ascii=False prevents from escaping unicode to ascii
  588. json_escaped = [json.dumps(t, ensure_ascii=False)[1:-1] for t in sub_texts if len(t) >= 3]
  589. # apply PG wildcard escaping to JSON-escaped texts (% becomes \%)
  590. wildcard_escaped = [re.sub(r'(_|%|\\)', r'\\\1', t) for t in json_escaped]
  591. # replace the original wildcard characters by %
  592. return f"%{'%'.join(wildcard_escaped)}%" if wildcard_escaped else "%"
  593. def make_identifier(identifier: str) -> str:
  594. """ Return ``identifier``, possibly modified to fit PostgreSQL's identifier size limitation.
  595. If too long, ``identifier`` is truncated and padded with a hash to make it mostly unique.
  596. """
  597. # if length exceeds the PostgreSQL limit of 63 characters.
  598. if len(identifier) > 63:
  599. # We have to fit a crc32 hash and one underscore into a 63 character
  600. # alias. The remaining space we can use to add a human readable prefix.
  601. return f"{identifier[:54]}_{crc32(identifier.encode()):08x}"
  602. return identifier
  603. def make_index_name(table_name: str, column_name: str) -> str:
  604. """ Return an index name according to conventions for the given table and column. """
  605. return make_identifier(f"{table_name}__{column_name}_index")
上海开阖软件有限公司 沪ICP备12045867号-1