gooderp18绿色标准版
Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

7552 lines
329KB

  1. # -*- coding: utf-8 -*-
  2. # Part of Odoo. See LICENSE file for full copyright and licensing details.
  3. """
  4. Object Relational Mapping module:
  5. * Hierarchical structure
  6. * Constraints consistency and validation
  7. * Object metadata depends on its status
  8. * Optimised processing by complex query (multiple actions at once)
  9. * Default field values
  10. * Permissions optimisation
  11. * Persistent object: DB postgresql
  12. * Data conversion
  13. * Multi-level caching system
  14. * Two different inheritance mechanisms
  15. * Rich set of field types:
  16. - classical (varchar, integer, boolean, ...)
  17. - relational (one2many, many2one, many2many)
  18. - functional
  19. """
  20. from __future__ import annotations
  21. import collections
  22. import contextlib
  23. import datetime
  24. import functools
  25. import inspect
  26. import itertools
  27. import io
  28. import json
  29. import logging
  30. import operator
  31. import pytz
  32. import re
  33. import uuid
  34. import warnings
  35. from collections import defaultdict, deque
  36. from collections.abc import MutableMapping, Callable
  37. from contextlib import closing
  38. from inspect import getmembers
  39. from operator import attrgetter, itemgetter
  40. import babel
  41. import babel.dates
  42. import dateutil.relativedelta
  43. import psycopg2
  44. import psycopg2.extensions
  45. from psycopg2.extras import Json
  46. import odoo
  47. from . import SUPERUSER_ID
  48. from . import api
  49. from . import tools
  50. from .api import NewId
  51. from .exceptions import AccessError, MissingError, ValidationError, UserError
  52. from .tools import (
  53. clean_context, config, date_utils, discardattr,
  54. DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, format_list,
  55. frozendict, get_lang, lazy_classproperty, OrderedSet,
  56. ormcache, partition, Query, split_every, unique,
  57. SQL, sql,
  58. )
  59. from .tools.lru import LRU
  60. from .tools.misc import LastOrderedSet, ReversedIterable, unquote
  61. from .tools.translate import _, LazyTranslate
  62. import typing
  63. if typing.TYPE_CHECKING:
  64. from collections.abc import Reversible
  65. from .modules.registry import Registry
  66. from odoo.api import Self, ValuesType, IdType
  67. _lt = LazyTranslate('base')
  68. _logger = logging.getLogger(__name__)
  69. _unlink = logging.getLogger(__name__ + '.unlink')
  70. regex_alphanumeric = re.compile(r'^[a-z0-9_]+$')
  71. regex_order = re.compile(r'''
  72. ^
  73. (\s*
  74. (?P<term>((?P<field>[a-z0-9_]+|"[a-z0-9_]+")(\.(?P<property>[a-z0-9_]+))?(:(?P<func>[a-z_]+))?))
  75. (\s+(?P<direction>desc|asc))?
  76. (\s+(?P<nulls>nulls\ first|nulls\ last))?
  77. \s*
  78. (,|$)
  79. )+
  80. (?<!,)
  81. $
  82. ''', re.IGNORECASE | re.VERBOSE)
  83. regex_object_name = re.compile(r'^[a-z0-9_.]+$')
  84. regex_pg_name = re.compile(r'^[a-z_][a-z0-9_$]*$', re.I)
  85. regex_field_agg = re.compile(r'(\w+)(?::(\w+)(?:\((\w+)\))?)?') # For read_group
  86. regex_read_group_spec = re.compile(r'(\w+)(\.(\w+))?(?::(\w+))?$') # For _read_group
  87. AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
  88. GC_UNLINK_LIMIT = 100_000
  89. INSERT_BATCH_SIZE = 100
  90. UPDATE_BATCH_SIZE = 100
  91. SQL_DEFAULT = psycopg2.extensions.AsIs("DEFAULT")
  92. def parse_read_group_spec(spec: str) -> tuple:
  93. """ Return a triplet corresponding to the given groupby/path/aggregate specification. """
  94. res_match = regex_read_group_spec.match(spec)
  95. if not res_match:
  96. raise ValueError(
  97. f'Invalid aggregate/groupby specification {spec!r}.\n'
  98. '- Valid aggregate specification looks like "<field_name>:<agg>" example: "quantity:sum".\n'
  99. '- Valid groupby specification looks like "<no_datish_field_name>" or "<datish_field_name>:<granularity>" example: "date:month" or "<properties_field_name>.<property>:<granularity>".'
  100. )
  101. groups = res_match.groups()
  102. return groups[0], groups[2], groups[3]
  103. def check_object_name(name):
  104. """ Check if the given name is a valid model name.
  105. The _name attribute in osv and osv_memory object is subject to
  106. some restrictions. This function returns True or False whether
  107. the given name is allowed or not.
  108. TODO: this is an approximation. The goal in this approximation
  109. is to disallow uppercase characters (in some places, we quote
  110. table/column names and in other not, which leads to this kind
  111. of errors:
  112. psycopg2.ProgrammingError: relation "xxx" does not exist).
  113. The same restriction should apply to both osv and osv_memory
  114. objects for consistency.
  115. """
  116. if regex_object_name.match(name) is None:
  117. return False
  118. return True
  119. def raise_on_invalid_object_name(name):
  120. if not check_object_name(name):
  121. msg = "The _name attribute %s is not valid." % name
  122. raise ValueError(msg)
  123. def check_pg_name(name):
  124. """ Check whether the given name is a valid PostgreSQL identifier name. """
  125. if not regex_pg_name.match(name):
  126. raise ValidationError("Invalid characters in table name %r" % name)
  127. if len(name) > 63:
  128. raise ValidationError("Table name %r is too long" % name)
  129. # match private methods, to prevent their remote invocation
  130. regex_private = re.compile(r'^(_.*|init)$')
  131. def check_method_name(name):
  132. """ Raise an ``AccessError`` if ``name`` is a private method name. """
  133. if regex_private.match(name):
  134. raise AccessError(_lt('Private methods (such as %s) cannot be called remotely.', name))
  135. def check_property_field_value_name(property_name):
  136. if not regex_alphanumeric.match(property_name) or len(property_name) > 512:
  137. raise ValueError(f"Wrong property field value name {property_name!r}.")
  138. def fix_import_export_id_paths(fieldname):
  139. """
  140. Fixes the id fields in import and exports, and splits field paths
  141. on '/'.
  142. :param str fieldname: name of the field to import/export
  143. :return: split field name
  144. :rtype: list of str
  145. """
  146. fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
  147. fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
  148. return fixed_external_id.split('/')
  149. def to_company_ids(companies):
  150. if isinstance(companies, BaseModel):
  151. return companies.ids
  152. elif isinstance(companies, (list, tuple, str)):
  153. return companies
  154. return [companies]
  155. def check_company_domain_parent_of(self, companies):
  156. """ A `_check_company_domain` function that lets a record be used if either:
  157. - record.company_id = False (which implies that it is shared between all companies), or
  158. - record.company_id is a parent of any of the given companies.
  159. """
  160. if isinstance(companies, str):
  161. return ['|', ('company_id', '=', False), ('company_id', 'parent_of', companies)]
  162. companies = [id for id in to_company_ids(companies) if id]
  163. if not companies:
  164. return [('company_id', '=', False)]
  165. return [('company_id', 'in', [
  166. int(parent)
  167. for rec in self.env['res.company'].sudo().browse(companies)
  168. for parent in rec.parent_path.split('/')[:-1]
  169. ] + [False])]
  170. def check_companies_domain_parent_of(self, companies):
  171. """ A `_check_company_domain` function that lets a record be used if
  172. any company in record.company_ids is a parent of any of the given companies.
  173. """
  174. if isinstance(companies, str):
  175. return [('company_ids', 'parent_of', companies)]
  176. companies = [id_ for id_ in to_company_ids(companies) if id_]
  177. if not companies:
  178. return []
  179. return [('company_ids', 'in', [
  180. int(parent)
  181. for rec in self.env['res.company'].sudo().browse(companies)
  182. for parent in rec.parent_path.split('/')[:-1]
  183. ])]
  184. class MetaModel(api.Meta):
  185. """ The metaclass of all model classes.
  186. Its main purpose is to register the models per module.
  187. """
  188. module_to_models = defaultdict(list)
  189. def __new__(meta, name, bases, attrs):
  190. # this prevents assignment of non-fields on recordsets
  191. attrs.setdefault('__slots__', ())
  192. # this collects the fields defined on the class (via Field.__set_name__())
  193. attrs.setdefault('_field_definitions', [])
  194. if attrs.get('_register', True):
  195. # determine '_module'
  196. if '_module' not in attrs:
  197. module = attrs['__module__']
  198. assert module.startswith('odoo.addons.'), \
  199. f"Invalid import of {module}.{name}, it should start with 'odoo.addons'."
  200. attrs['_module'] = module.split('.')[2]
  201. # determine model '_name' and normalize '_inherits'
  202. inherit = attrs.get('_inherit', ())
  203. if isinstance(inherit, str):
  204. inherit = attrs['_inherit'] = [inherit]
  205. if '_name' not in attrs:
  206. attrs['_name'] = inherit[0] if len(inherit) == 1 else name
  207. return super().__new__(meta, name, bases, attrs)
  208. def __init__(self, name, bases, attrs):
  209. super().__init__(name, bases, attrs)
  210. if '__init__' in attrs and len(inspect.signature(attrs['__init__']).parameters) != 4:
  211. _logger.warning("The method %s.__init__ doesn't match the new signature in module %s", name, attrs.get('__module__'))
  212. if not attrs.get('_register', True):
  213. return
  214. # Remember which models to instantiate for this module.
  215. if self._module:
  216. self.module_to_models[self._module].append(self)
  217. if not self._abstract and self._name not in self._inherit:
  218. # this class defines a model: add magic fields
  219. def add(name, field):
  220. setattr(self, name, field)
  221. field.__set_name__(self, name)
  222. def add_default(name, field):
  223. if name not in attrs:
  224. setattr(self, name, field)
  225. field.__set_name__(self, name)
  226. add('id', fields.Id(automatic=True))
  227. add_default('display_name', fields.Char(
  228. string='Display Name', automatic=True,
  229. compute='_compute_display_name',
  230. search='_search_display_name',
  231. ))
  232. if attrs.get('_log_access', self._auto):
  233. add_default('create_uid', fields.Many2one(
  234. 'res.users', string='Created by', automatic=True, readonly=True))
  235. add_default('create_date', fields.Datetime(
  236. string='Created on', automatic=True, readonly=True))
  237. add_default('write_uid', fields.Many2one(
  238. 'res.users', string='Last Updated by', automatic=True, readonly=True))
  239. add_default('write_date', fields.Datetime(
  240. string='Last Updated on', automatic=True, readonly=True))
  241. def origin_ids(ids):
  242. """ Return an iterator over the origin ids corresponding to ``ids``.
  243. Actual ids are returned as is, and ids without origin are not returned.
  244. """
  245. return ((id_ or id_.origin) for id_ in ids if (id_ or getattr(id_, "origin", None)))
  246. class OriginIds:
  247. """ A reversible iterable returning the origin ids of a collection of ``ids``. """
  248. __slots__ = ['ids']
  249. def __init__(self, ids):
  250. self.ids = ids
  251. def __iter__(self):
  252. return origin_ids(self.ids)
  253. def __reversed__(self):
  254. return origin_ids(reversed(self.ids))
  255. def expand_ids(id0, ids):
  256. """ Return an iterator of unique ids from the concatenation of ``[id0]`` and
  257. ``ids``, and of the same kind (all real or all new).
  258. """
  259. yield id0
  260. seen = {id0}
  261. kind = bool(id0)
  262. for id_ in ids:
  263. if id_ not in seen and bool(id_) == kind:
  264. yield id_
  265. seen.add(id_)
  266. # maximum number of prefetched records
  267. PREFETCH_MAX = 1000
  268. # special columns automatically created by the ORM
  269. LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
  270. MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS
  271. # read_group stuff
  272. READ_GROUP_TIME_GRANULARITY = {
  273. 'hour': dateutil.relativedelta.relativedelta(hours=1),
  274. 'day': dateutil.relativedelta.relativedelta(days=1),
  275. 'week': datetime.timedelta(days=7),
  276. 'month': dateutil.relativedelta.relativedelta(months=1),
  277. 'quarter': dateutil.relativedelta.relativedelta(months=3),
  278. 'year': dateutil.relativedelta.relativedelta(years=1)
  279. }
  280. READ_GROUP_NUMBER_GRANULARITY = {
  281. 'year_number': 'year',
  282. 'quarter_number': 'quarter',
  283. 'month_number': 'month',
  284. 'iso_week_number': 'week', # ISO week number because anything else than ISO is nonsense
  285. 'day_of_year': 'doy',
  286. 'day_of_month': 'day',
  287. 'day_of_week': 'dow',
  288. 'hour_number': 'hour',
  289. 'minute_number': 'minute',
  290. 'second_number': 'second',
  291. }
  292. READ_GROUP_ALL_TIME_GRANULARITY = READ_GROUP_TIME_GRANULARITY | READ_GROUP_NUMBER_GRANULARITY
  293. # valid SQL aggregation functions
  294. READ_GROUP_AGGREGATE = {
  295. 'sum': lambda table, expr: SQL('SUM(%s)', expr),
  296. 'avg': lambda table, expr: SQL('AVG(%s)', expr),
  297. 'max': lambda table, expr: SQL('MAX(%s)', expr),
  298. 'min': lambda table, expr: SQL('MIN(%s)', expr),
  299. 'bool_and': lambda table, expr: SQL('BOOL_AND(%s)', expr),
  300. 'bool_or': lambda table, expr: SQL('BOOL_OR(%s)', expr),
  301. 'array_agg': lambda table, expr: SQL('ARRAY_AGG(%s ORDER BY %s)', expr, SQL.identifier(table, 'id')),
  302. # 'recordset' aggregates will be post-processed to become recordsets
  303. 'recordset': lambda table, expr: SQL('ARRAY_AGG(%s ORDER BY %s)', expr, SQL.identifier(table, 'id')),
  304. 'count': lambda table, expr: SQL('COUNT(%s)', expr),
  305. 'count_distinct': lambda table, expr: SQL('COUNT(DISTINCT %s)', expr),
  306. }
  307. READ_GROUP_DISPLAY_FORMAT = {
  308. # Careful with week/year formats:
  309. # - yyyy (lower) must always be used, *except* for week+year formats
  310. # - YYYY (upper) must always be used for week+year format
  311. # e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
  312. # and W1 2006 for others
  313. #
  314. # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
  315. # such as 2006-01-01 being formatted as "January 2005" in some locales.
  316. # Cfr: http://babel.pocoo.org/en/latest/dates.html#date-fields
  317. 'hour': 'hh:00 dd MMM',
  318. 'day': 'dd MMM yyyy', # yyyy = normal year
  319. 'week': "'W'w YYYY", # w YYYY = ISO week-year
  320. 'month': 'MMMM yyyy',
  321. 'quarter': 'QQQ yyyy',
  322. 'year': 'yyyy',
  323. }
  324. # THE DEFINITION AND REGISTRY CLASSES
  325. #
  326. # The framework deals with two kinds of classes for models: the "definition"
  327. # classes and the "registry" classes.
  328. #
  329. # The "definition" classes are the ones defined in modules source code: they
  330. # define models and extend them. Those classes are essentially "static", for
  331. # whatever that means in Python. The only exception is custom models: their
  332. # definition class is created dynamically.
  333. #
  334. # The "registry" classes are the ones you find in the registry. They are the
  335. # actual classes of the recordsets of their model. The "registry" class of a
  336. # model is created dynamically when the registry is built. It inherits (in the
  337. # Python sense) from all the definition classes of the model, and possibly other
  338. # registry classes (when the model inherits from another model). It also
  339. # carries model metadata inferred from its parent classes.
  340. #
  341. #
  342. # THE REGISTRY CLASS OF A MODEL
  343. #
  344. # In the simplest case, a model's registry class inherits from all the classes
  345. # that define the model in a flat hierarchy. Consider the model definition
  346. # below. The registry class of model 'a' inherits from the definition classes
  347. # A1, A2, A3, in reverse order, to match the expected overriding order. The
  348. # registry class carries inferred metadata that is shared between all the
  349. # model's instances for a given registry.
  350. #
  351. # class A1(Model): Model
  352. # _name = 'a' / | \
  353. # A3 A2 A1 <- definition classes
  354. # class A2(Model): \ | /
  355. # _inherit = 'a' a <- registry class: registry['a']
  356. # |
  357. # class A3(Model): records <- model instances, like env['a']
  358. # _inherit = 'a'
  359. #
  360. # Note that when the model inherits from another model, we actually make the
  361. # registry classes inherit from each other, so that extensions to an inherited
  362. # model are visible in the registry class of the child model, like in the
  363. # following example.
  364. #
  365. # class A1(Model):
  366. # _name = 'a' Model
  367. # / / \ \
  368. # class B1(Model): / / \ \
  369. # _name = 'b' / A2 A1 \
  370. # B2 \ / B1
  371. # class B2(Model): \ \ / /
  372. # _name = 'b' \ a /
  373. # _inherit = ['a', 'b'] \ | /
  374. # \ | /
  375. # class A2(Model): b
  376. # _inherit = 'a'
  377. #
  378. #
  379. # THE FIELDS OF A MODEL
  380. #
  381. # The fields of a model are given by the model's definition classes, inherited
  382. # models ('_inherit' and '_inherits') and other parties, like custom fields.
  383. # Note that a field can be partially overridden when it appears on several
  384. # definition classes of its model. In that case, the field's final definition
  385. # depends on the presence or absence of each definition class, which itself
  386. # depends on the modules loaded in the registry.
  387. #
  388. # By design, the registry class has access to all the fields on the model's
  389. # definition classes. When possible, the field is used directly from the
  390. # model's registry class. There are a number of cases where the field cannot be
  391. # used directly:
  392. # - the field is related (and bits may not be shared);
  393. # - the field is overridden on definition classes;
  394. # - the field is defined for another model (and accessible by mixin).
  395. #
  396. # The last case prevents sharing the field, because the field object is specific
  397. # to a model, and is used as a key in several key dictionaries, like the record
  398. # cache and pending computations.
  399. #
  400. # Setting up a field on its definition class helps saving memory and time.
  401. # Indeed, when sharing is possible, the field's setup is almost entirely done
  402. # where the field was defined. It is thus done when the definition class was
  403. # created, and it may be reused across registries.
  404. #
  405. # In the example below, the field 'foo' appears once on its model's definition
  406. # classes. Assuming that it is not related, that field can be set up directly
  407. # on its definition class. If the model appears in several registries, the
  408. # field 'foo' is effectively shared across registries.
  409. #
  410. # class A1(Model): Model
  411. # _name = 'a' / \
  412. # foo = ... / \
  413. # bar = ... A2 A1
  414. # bar foo, bar
  415. # class A2(Model): \ /
  416. # _inherit = 'a' \ /
  417. # bar = ... a
  418. # bar
  419. #
  420. # On the other hand, the field 'bar' is overridden in its model's definition
  421. # classes. In that case, the framework recreates the field on the model's
  422. # registry class. The field's setup will be based on its definitions, and will
  423. # not be shared across registries.
  424. #
  425. # The so-called magic fields ('id', 'display_name', ...) used to be added on
  426. # registry classes. But doing so prevents them from being shared. So instead,
  427. # we add them on definition classes that define a model without extending it.
  428. # This increases the number of fields that are shared across registries.
  429. def is_definition_class(cls):
  430. """ Return whether ``cls`` is a model definition class. """
  431. return isinstance(cls, MetaModel) and getattr(cls, 'pool', None) is None
  432. def is_registry_class(cls):
  433. """ Return whether ``cls`` is a model registry class. """
  434. return getattr(cls, 'pool', None) is not None
  435. class BaseModel(metaclass=MetaModel):
  436. """Base class for Odoo models.
  437. Odoo models are created by inheriting one of the following:
  438. * :class:`Model` for regular database-persisted models
  439. * :class:`TransientModel` for temporary data, stored in the database but
  440. automatically vacuumed every so often
  441. * :class:`AbstractModel` for abstract super classes meant to be shared by
  442. multiple inheriting models
  443. The system automatically instantiates every model once per database. Those
  444. instances represent the available models on each database, and depend on
  445. which modules are installed on that database. The actual class of each
  446. instance is built from the Python classes that create and inherit from the
  447. corresponding model.
  448. Every model instance is a "recordset", i.e., an ordered collection of
  449. records of the model. Recordsets are returned by methods like
  450. :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
  451. explicit representation: a record is represented as a recordset of one
  452. record.
  453. To create a class that should not be instantiated,
  454. the :attr:`~odoo.models.BaseModel._register` attribute may be set to False.
  455. """
  456. __slots__ = ['env', '_ids', '_prefetch_ids']
  457. env: api.Environment
  458. id: IdType | typing.Literal[False]
  459. display_name: str | typing.Literal[False]
  460. pool: Registry
  461. _fields: dict[str, Field]
  462. _auto = False
  463. """Whether a database table should be created.
  464. If set to ``False``, override :meth:`~odoo.models.BaseModel.init`
  465. to create the database table.
  466. Automatically defaults to `True` for :class:`Model` and
  467. :class:`TransientModel`, `False` for :class:`AbstractModel`.
  468. .. tip:: To create a model without any table, inherit
  469. from :class:`~odoo.models.AbstractModel`.
  470. """
  471. _register = False #: registry visibility
  472. _abstract = True
  473. """ Whether the model is *abstract*.
  474. .. seealso:: :class:`AbstractModel`
  475. """
  476. _transient = False
  477. """ Whether the model is *transient*.
  478. .. seealso:: :class:`TransientModel`
  479. """
  480. _name: str | None = None #: the model name (in dot-notation, module namespace)
  481. _description: str | None = None #: the model's informal name
  482. _module = None #: the model's module (in the Odoo sense)
  483. _custom = False #: should be True for custom models only
  484. _inherit: str | list[str] | tuple[str, ...] = ()
  485. """Python-inherited models:
  486. :type: str or list(str) or tuple(str)
  487. .. note::
  488. * If :attr:`._name` is set, name(s) of parent models to inherit from
  489. * If :attr:`._name` is unset, name of a single model to extend in-place
  490. """
  491. _inherits = frozendict()
  492. """dictionary {'parent_model': 'm2o_field'} mapping the _name of the parent business
  493. objects to the names of the corresponding foreign key fields to use::
  494. _inherits = {
  495. 'a.model': 'a_field_id',
  496. 'b.model': 'b_field_id'
  497. }
  498. implements composition-based inheritance: the new model exposes all
  499. the fields of the inherited models but stores none of them:
  500. the values themselves remain stored on the linked record.
  501. .. warning::
  502. if multiple fields with the same name are defined in the
  503. :attr:`~odoo.models.Model._inherits`-ed models, the inherited field will
  504. correspond to the last one (in the inherits list order).
  505. """
  506. _table = None #: SQL table name used by model if :attr:`_auto`
  507. _table_query = None #: SQL expression of the table's content (optional)
  508. _sql_constraints: list[tuple[str, str, str]] = [] #: SQL constraints [(name, sql_def, message)]
  509. _rec_name = None #: field to use for labeling records, default: ``name``
  510. _rec_names_search: list[str] | None = None #: fields to consider in ``name_search``
  511. _order = 'id' #: default order field for searching results
  512. _parent_name = 'parent_id' #: the many2one field used as parent field
  513. _parent_store = False
  514. """set to True to compute parent_path field.
  515. Alongside a :attr:`~.parent_path` field, sets up an indexed storage
  516. of the tree structure of records, to enable faster hierarchical queries
  517. on the records of the current model using the ``child_of`` and
  518. ``parent_of`` domain operators.
  519. """
  520. _active_name = None
  521. """field to use for active records, automatically set to either ``"active"``
  522. or ``"x_active"``.
  523. """
  524. _fold_name = 'fold' #: field to determine folded groups in kanban views
  525. _translate = True # False disables translations export for this model (Old API)
  526. _check_company_auto = False
  527. """On write and create, call ``_check_company`` to ensure companies
  528. consistency on the relational fields having ``check_company=True``
  529. as attribute.
  530. """
  531. _allow_sudo_commands = True
  532. """Allow One2many and Many2many Commands targeting this model in an environment using `sudo()` or `with_user()`.
  533. By disabling this flag, security-sensitive models protect themselves
  534. against malicious manipulation of One2many or Many2many fields
  535. through an environment using `sudo` or a more privileged user.
  536. """
  537. _depends = frozendict()
  538. """dependencies of models backed up by SQL views
  539. ``{model_name: field_names}``, where ``field_names`` is an iterable.
  540. This is only used to determine the changes to flush to database before
  541. executing ``search()`` or ``read_group()``. It won't be used for cache
  542. invalidation or recomputing fields.
  543. """
  544. # default values for _transient_vacuum()
  545. _transient_max_count = lazy_classproperty(lambda _: config.get('osv_memory_count_limit'))
  546. "maximum number of transient records, unlimited if ``0``"
  547. _transient_max_hours = lazy_classproperty(lambda _: config.get('transient_age_limit'))
  548. "maximum idle lifetime (in hours), unlimited if ``0``"
  549. def _valid_field_parameter(self, field, name):
  550. """ Return whether the given parameter name is valid for the field. """
  551. return name == 'related_sudo'
  552. @api.model
  553. def _add_field(self, name, field):
  554. """ Add the given ``field`` under the given ``name`` in the class """
  555. cls = self.env.registry[self._name]
  556. # Assert the name is an existing field in the model, or any model in the _inherits
  557. # or a custom field (starting by `x_`)
  558. is_class_field = any(
  559. isinstance(getattr(model, name, None), fields.Field)
  560. for model in [cls] + [self.env.registry[inherit] for inherit in cls._inherits]
  561. )
  562. if not (is_class_field or self.env['ir.model.fields']._is_manual_name(name)):
  563. raise ValidationError(
  564. f"The field `{name}` is not defined in the `{cls._name}` Python class and does not start with 'x_'"
  565. )
  566. # Assert the attribute to assign is a Field
  567. if not isinstance(field, fields.Field):
  568. raise ValidationError("You can only add `fields.Field` objects to a model fields")
  569. if not isinstance(getattr(cls, name, field), Field):
  570. _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
  571. setattr(cls, name, field)
  572. field._toplevel = True
  573. field.__set_name__(cls, name)
  574. # add field as an attribute and in cls._fields (for reflection)
  575. cls._fields[name] = field
  576. @api.model
  577. def _pop_field(self, name):
  578. """ Remove the field with the given ``name`` from the model.
  579. This method should only be used for manual fields.
  580. """
  581. cls = self.env.registry[self._name]
  582. field = cls._fields.pop(name, None)
  583. discardattr(cls, name)
  584. if cls._rec_name == name:
  585. # fixup _rec_name and display_name's dependencies
  586. cls._rec_name = None
  587. if cls.display_name in cls.pool.field_depends:
  588. cls.pool.field_depends[cls.display_name] = tuple(
  589. dep for dep in cls.pool.field_depends[cls.display_name] if dep != name
  590. )
  591. return field
  592. #
  593. # Goal: try to apply inheritance at the instantiation level and
  594. # put objects in the pool var
  595. #
  596. @classmethod
  597. def _build_model(cls, pool, cr):
  598. """ Instantiate a given model in the registry.
  599. This method creates or extends a "registry" class for the given model.
  600. This "registry" class carries inferred model metadata, and inherits (in
  601. the Python sense) from all classes that define the model, and possibly
  602. other registry classes.
  603. """
  604. if getattr(cls, '_constraints', None):
  605. _logger.warning("Model attribute '_constraints' is no longer supported, "
  606. "please use @api.constrains on methods instead.")
  607. # Keep links to non-inherited constraints in cls; this is useful for
  608. # instance when exporting translations
  609. cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
  610. # all models except 'base' implicitly inherit from 'base'
  611. name = cls._name
  612. parents = list(cls._inherit)
  613. if name != 'base':
  614. parents.append('base')
  615. # create or retrieve the model's class
  616. if name in parents:
  617. if name not in pool:
  618. raise TypeError("Model %r does not exist in registry." % name)
  619. ModelClass = pool[name]
  620. ModelClass._build_model_check_base(cls)
  621. check_parent = ModelClass._build_model_check_parent
  622. else:
  623. ModelClass = type(name, (cls,), {
  624. '_name': name,
  625. '_register': False,
  626. '_original_module': cls._module,
  627. '_inherit_module': {}, # map parent to introducing module
  628. '_inherit_children': OrderedSet(), # names of children models
  629. '_inherits_children': set(), # names of children models
  630. '_fields': {}, # populated in _setup_base()
  631. })
  632. check_parent = cls._build_model_check_parent
  633. # determine all the classes the model should inherit from
  634. bases = LastOrderedSet([cls])
  635. for parent in parents:
  636. if parent not in pool:
  637. raise TypeError("Model %r inherits from non-existing model %r." % (name, parent))
  638. parent_class = pool[parent]
  639. if parent == name:
  640. for base in parent_class.__base_classes:
  641. bases.add(base)
  642. else:
  643. check_parent(cls, parent_class)
  644. bases.add(parent_class)
  645. ModelClass._inherit_module[parent] = cls._module
  646. parent_class._inherit_children.add(name)
  647. # ModelClass.__bases__ must be assigned those classes; however, this
  648. # operation is quite slow, so we do it once in method _prepare_setup()
  649. ModelClass.__base_classes = tuple(bases)
  650. # determine the attributes of the model's class
  651. ModelClass._build_model_attributes(pool)
  652. check_pg_name(ModelClass._table)
  653. # Transience
  654. if ModelClass._transient:
  655. assert ModelClass._log_access, \
  656. "TransientModels must have log_access turned on, " \
  657. "in order to implement their vacuum policy"
  658. # link the class to the registry, and update the registry
  659. ModelClass.pool = pool
  660. pool[name] = ModelClass
  661. return ModelClass
  662. @classmethod
  663. def _build_model_check_base(model_class, cls):
  664. """ Check whether ``model_class`` can be extended with ``cls``. """
  665. if model_class._abstract and not cls._abstract:
  666. msg = ("%s transforms the abstract model %r into a non-abstract model. "
  667. "That class should either inherit from AbstractModel, or set a different '_name'.")
  668. raise TypeError(msg % (cls, model_class._name))
  669. if model_class._transient != cls._transient:
  670. if model_class._transient:
  671. msg = ("%s transforms the transient model %r into a non-transient model. "
  672. "That class should either inherit from TransientModel, or set a different '_name'.")
  673. else:
  674. msg = ("%s transforms the model %r into a transient model. "
  675. "That class should either inherit from Model, or set a different '_name'.")
  676. raise TypeError(msg % (cls, model_class._name))
  677. @classmethod
  678. def _build_model_check_parent(model_class, cls, parent_class):
  679. """ Check whether ``model_class`` can inherit from ``parent_class``. """
  680. if model_class._abstract and not parent_class._abstract:
  681. msg = ("In %s, the abstract model %r cannot inherit from the non-abstract model %r.")
  682. raise TypeError(msg % (cls, model_class._name, parent_class._name))
  683. @classmethod
  684. def _build_model_attributes(cls, pool):
  685. """ Initialize base model attributes. """
  686. cls._description = cls._name
  687. cls._table = cls._name.replace('.', '_')
  688. cls._log_access = cls._auto
  689. inherits = {}
  690. depends = {}
  691. cls._sql_constraints = {}
  692. for base in reversed(cls.__base_classes):
  693. if is_definition_class(base):
  694. # the following attributes are not taken from registry classes
  695. if cls._name not in base._inherit and not base._description:
  696. _logger.warning("The model %s has no _description", cls._name)
  697. cls._description = base._description or cls._description
  698. cls._table = base._table or cls._table
  699. cls._log_access = getattr(base, '_log_access', cls._log_access)
  700. inherits.update(base._inherits)
  701. for mname, fnames in base._depends.items():
  702. depends.setdefault(mname, []).extend(fnames)
  703. for cons in base._sql_constraints:
  704. cls._sql_constraints[cons[0]] = cons
  705. cls._sql_constraints = list(cls._sql_constraints.values())
  706. # avoid assigning an empty dict to save memory
  707. if inherits:
  708. cls._inherits = inherits
  709. if depends:
  710. cls._depends = depends
  711. # update _inherits_children of parent models
  712. for parent_name in cls._inherits:
  713. pool[parent_name]._inherits_children.add(cls._name)
  714. # recompute attributes of _inherit_children models
  715. for child_name in cls._inherit_children:
  716. child_class = pool[child_name]
  717. child_class._build_model_attributes(pool)
  718. @classmethod
  719. def _init_constraints_onchanges(cls):
  720. # store list of sql constraint qualified names
  721. for (key, _, _) in cls._sql_constraints:
  722. cls.pool._sql_constraints.add(cls._table + '_' + key)
  723. # reset properties memoized on cls
  724. cls._constraint_methods = BaseModel._constraint_methods
  725. cls._ondelete_methods = BaseModel._ondelete_methods
  726. cls._onchange_methods = BaseModel._onchange_methods
  727. @property
  728. def _table_sql(self) -> SQL:
  729. """ Return an :class:`SQL` object that represents SQL table identifier
  730. or table query.
  731. """
  732. table_query = self._table_query
  733. if table_query and isinstance(table_query, SQL):
  734. table_sql = SQL("(%s)", table_query)
  735. elif table_query:
  736. table_sql = SQL(f"({table_query})")
  737. else:
  738. table_sql = SQL.identifier(self._table)
  739. if not self._depends:
  740. return table_sql
  741. # add self._depends (and its transitive closure) as metadata to table_sql
  742. fields_to_flush = []
  743. models = [self]
  744. while models:
  745. current_model = models.pop()
  746. for model_name, field_names in current_model._depends.items():
  747. model = self.env[model_name]
  748. models.append(model)
  749. fields_to_flush.extend(model._fields[fname] for fname in field_names)
  750. return SQL().join([
  751. table_sql,
  752. *(SQL(to_flush=field) for field in fields_to_flush),
  753. ])
  754. @property
  755. def _constraint_methods(self):
  756. """ Return a list of methods implementing Python constraints. """
  757. def is_constraint(func):
  758. return callable(func) and hasattr(func, '_constrains')
  759. def wrap(func, names):
  760. # wrap func into a proxy function with explicit '_constrains'
  761. @api.constrains(*names)
  762. def wrapper(self):
  763. return func(self)
  764. return wrapper
  765. cls = self.env.registry[self._name]
  766. methods = []
  767. for attr, func in getmembers(cls, is_constraint):
  768. if callable(func._constrains):
  769. func = wrap(func, func._constrains(self))
  770. for name in func._constrains:
  771. field = cls._fields.get(name)
  772. if not field:
  773. _logger.warning("method %s.%s: @constrains parameter %r is not a field name", cls._name, attr, name)
  774. elif not (field.store or field.inverse or field.inherited):
  775. _logger.warning("method %s.%s: @constrains parameter %r is not writeable", cls._name, attr, name)
  776. methods.append(func)
  777. # optimization: memoize result on cls, it will not be recomputed
  778. cls._constraint_methods = methods
  779. return methods
  780. @property
  781. def _ondelete_methods(self):
  782. """ Return a list of methods implementing checks before unlinking. """
  783. def is_ondelete(func):
  784. return callable(func) and hasattr(func, '_ondelete')
  785. cls = self.env.registry[self._name]
  786. methods = [func for _, func in getmembers(cls, is_ondelete)]
  787. # optimization: memoize results on cls, it will not be recomputed
  788. cls._ondelete_methods = methods
  789. return methods
  790. @property
  791. def _onchange_methods(self):
  792. """ Return a dictionary mapping field names to onchange methods. """
  793. def is_onchange(func):
  794. return callable(func) and hasattr(func, '_onchange')
  795. # collect onchange methods on the model's class
  796. cls = self.env.registry[self._name]
  797. methods = defaultdict(list)
  798. for attr, func in getmembers(cls, is_onchange):
  799. missing = []
  800. for name in func._onchange:
  801. if name not in cls._fields:
  802. missing.append(name)
  803. methods[name].append(func)
  804. if missing:
  805. _logger.warning(
  806. "@api.onchange%r parameters must be field names -> not valid: %s",
  807. func._onchange, missing
  808. )
  809. # add onchange methods to implement "change_default" on fields
  810. def onchange_default(field, self):
  811. value = field.convert_to_write(self[field.name], self)
  812. condition = "%s=%s" % (field.name, value)
  813. defaults = self.env['ir.default']._get_model_defaults(self._name, condition)
  814. self.update(defaults)
  815. for name, field in cls._fields.items():
  816. if field.change_default:
  817. methods[name].append(functools.partial(onchange_default, field))
  818. # optimization: memoize result on cls, it will not be recomputed
  819. cls._onchange_methods = methods
  820. return methods
  821. def _is_an_ordinary_table(self):
  822. return self.pool.is_an_ordinary_table(self)
  823. def __ensure_xml_id(self, skip=False):
  824. """ Create missing external ids for records in ``self``, and return an
  825. iterator of pairs ``(record, xmlid)`` for the records in ``self``.
  826. :rtype: Iterable[Model, str | None]
  827. """
  828. if skip:
  829. return ((record, None) for record in self)
  830. if not self:
  831. return iter([])
  832. if not self._is_an_ordinary_table():
  833. raise Exception(
  834. "You can not export the column ID of model %s, because the "
  835. "table %s is not an ordinary table."
  836. % (self._name, self._table))
  837. modname = '__export__'
  838. cr = self.env.cr
  839. cr.execute(SQL("""
  840. SELECT res_id, module, name
  841. FROM ir_model_data
  842. WHERE model = %s AND res_id IN %s
  843. """, self._name, tuple(self.ids)))
  844. xids = {
  845. res_id: (module, name)
  846. for res_id, module, name in cr.fetchall()
  847. }
  848. def to_xid(record_id):
  849. (module, name) = xids[record_id]
  850. return ('%s.%s' % (module, name)) if module else name
  851. # create missing xml ids
  852. missing = self.filtered(lambda r: r.id not in xids)
  853. if not missing:
  854. return (
  855. (record, to_xid(record.id))
  856. for record in self
  857. )
  858. xids.update(
  859. (r.id, (modname, '%s_%s_%s' % (
  860. r._table,
  861. r.id,
  862. uuid.uuid4().hex[:8],
  863. )))
  864. for r in missing
  865. )
  866. fields = ['module', 'model', 'name', 'res_id']
  867. # disable eventual async callback / support for the extent of
  868. # the COPY FROM, as these are apparently incompatible
  869. callback = psycopg2.extensions.get_wait_callback()
  870. psycopg2.extensions.set_wait_callback(None)
  871. try:
  872. cr.copy_from(io.StringIO(
  873. u'\n'.join(
  874. u"%s\t%s\t%s\t%d" % (
  875. modname,
  876. record._name,
  877. xids[record.id][1],
  878. record.id,
  879. )
  880. for record in missing
  881. )),
  882. table='ir_model_data',
  883. columns=fields,
  884. )
  885. finally:
  886. psycopg2.extensions.set_wait_callback(callback)
  887. self.env['ir.model.data'].invalidate_model(fields)
  888. return (
  889. (record, to_xid(record.id))
  890. for record in self
  891. )
  892. def _export_rows(self, fields, *, _is_toplevel_call=True):
  893. """ Export fields of the records in ``self``.
  894. :param list fields: list of lists of fields to traverse
  895. :param bool _is_toplevel_call:
  896. used when recursing, avoid using when calling from outside
  897. :return: list of lists of corresponding values
  898. """
  899. import_compatible = self.env.context.get('import_compat', True)
  900. lines = []
  901. def splittor(rs):
  902. """ Splits the self recordset in batches of 1000 (to avoid
  903. entire-recordset-prefetch-effects) & removes the previous batch
  904. from the cache after it's been iterated in full
  905. """
  906. for idx in range(0, len(rs), 1000):
  907. sub = rs[idx:idx+1000]
  908. for rec in sub:
  909. yield rec
  910. sub.invalidate_recordset()
  911. if not _is_toplevel_call:
  912. splittor = lambda rs: rs
  913. # {properties_fname: {record: {property_name: (value, property_type)}}}
  914. cache_properties = {}
  915. def get_property(properties_fname, property_name, record):
  916. # FIXME: Only efficient during the _is_toplevel_call == True
  917. if properties_fname not in cache_properties:
  918. properties_field = self._fields[properties_fname]
  919. # each value is either None or a dict
  920. result = []
  921. for rec in self:
  922. raw_properties = rec[properties_fname]
  923. definition = properties_field._get_properties_definition(rec)
  924. if not raw_properties or not definition:
  925. result.append(definition or [])
  926. else:
  927. assert isinstance(raw_properties, dict), f"Wrong type {raw_properties!r}"
  928. result.append(properties_field._dict_to_list(raw_properties, definition))
  929. # FIXME: Far from optimal, it will fetch display_name for no reason
  930. res_ids_per_model = properties_field._get_res_ids_per_model(self, result)
  931. cache_properties[properties_fname] = record_map = {}
  932. for properties, rec in zip(result, self):
  933. properties_field._parse_json_types(properties, self.env, res_ids_per_model)
  934. record_map[rec] = prop_map = {}
  935. for prop in properties:
  936. value = prop.get('value')
  937. prop_type = prop.get('type')
  938. property_model = prop.get('comodel')
  939. if prop_type in ('many2one', 'many2many') and property_model:
  940. value = self.env[property_model].browse(value)
  941. elif prop_type == 'tags' and value:
  942. value = ",".join(
  943. next(iter(tag[1] for tag in prop['tags'] if tag[0] == v), '')
  944. for v in value
  945. )
  946. elif prop_type == 'selection':
  947. value = dict(prop['selection']).get(value, '')
  948. prop_map[prop['name']] = (value, prop_type)
  949. return cache_properties[properties_fname][record].get(property_name, ('', 'char'))
  950. # memory stable but ends up prefetching 275 fields (???)
  951. for record in splittor(self):
  952. # main line of record, initially empty
  953. current = [''] * len(fields)
  954. lines.append(current)
  955. # list of primary fields followed by secondary field(s)
  956. primary_done = []
  957. # process column by column
  958. for i, path in enumerate(fields):
  959. if not path:
  960. continue
  961. name = path[0]
  962. if name in primary_done:
  963. continue
  964. if name == '.id':
  965. current[i] = str(record.id)
  966. elif name == 'id':
  967. current[i] = (record._name, record.id)
  968. else:
  969. prop_name = None
  970. if '.' in name:
  971. fname, prop_name = name.split('.')
  972. field = record._fields[fname]
  973. assert field.type == 'properties' and prop_name
  974. value, field_type = get_property(fname, prop_name, record)
  975. else:
  976. field = record._fields[name]
  977. field_type = field.type
  978. value = record[name]
  979. # this part could be simpler, but it has to be done this way
  980. # in order to reproduce the former behavior
  981. if not isinstance(value, BaseModel):
  982. current[i] = field.convert_to_export(value, record)
  983. elif import_compatible and field_type == 'reference':
  984. current[i] = f"{value._name},{value.id}"
  985. else:
  986. primary_done.append(name)
  987. # recursively export the fields that follow name; use
  988. # 'display_name' where no subfield is exported
  989. fields2 = [(p[1:] or ['display_name'] if p and p[0] == name else [])
  990. for p in fields]
  991. # in import_compat mode, m2m should always be exported as
  992. # a comma-separated list of xids or names in a single cell
  993. if import_compatible and field_type == 'many2many':
  994. index = None
  995. # find out which subfield the user wants & its
  996. # location as we might not get it as the first
  997. # column we encounter
  998. for name in ['id', 'name', 'display_name']:
  999. with contextlib.suppress(ValueError):
  1000. index = fields2.index([name])
  1001. break
  1002. if index is None:
  1003. # not found anything, assume we just want the
  1004. # display_name in the first column
  1005. name = None
  1006. index = i
  1007. if name == 'id':
  1008. xml_ids = [xid for _, xid in value.__ensure_xml_id()]
  1009. current[index] = ','.join(xml_ids)
  1010. else:
  1011. current[index] = ','.join(value.mapped('display_name')) if value else ''
  1012. continue
  1013. lines2 = value._export_rows(fields2, _is_toplevel_call=False)
  1014. if lines2:
  1015. # merge first line with record's main line
  1016. for j, val in enumerate(lines2[0]):
  1017. if val or isinstance(val, (int, float)):
  1018. current[j] = val
  1019. # append the other lines at the end
  1020. lines += lines2[1:]
  1021. else:
  1022. current[i] = ''
  1023. # if any xid should be exported, only do so at toplevel
  1024. if _is_toplevel_call and any(f[-1] == 'id' for f in fields):
  1025. bymodels = collections.defaultdict(set)
  1026. xidmap = collections.defaultdict(list)
  1027. # collect all the tuples in "lines" (along with their coordinates)
  1028. for i, line in enumerate(lines):
  1029. for j, cell in enumerate(line):
  1030. if isinstance(cell, tuple):
  1031. bymodels[cell[0]].add(cell[1])
  1032. xidmap[cell].append((i, j))
  1033. # for each model, xid-export everything and inject in matrix
  1034. for model, ids in bymodels.items():
  1035. for record, xid in self.env[model].browse(ids).__ensure_xml_id():
  1036. for i, j in xidmap.pop((record._name, record.id)):
  1037. lines[i][j] = xid
  1038. assert not xidmap, "failed to export xids for %s" % ', '.join('{}:{}' % it for it in xidmap.items())
  1039. return lines
  1040. def export_data(self, fields_to_export):
  1041. """ Export fields for selected objects
  1042. This method is used when exporting data via client menu
  1043. :param list fields_to_export: list of fields
  1044. :returns: dictionary with a *datas* matrix
  1045. :rtype: dict
  1046. """
  1047. if not (self.env.is_admin() or self.env.user.has_group('base.group_allow_export')):
  1048. raise UserError(_("You don't have the rights to export data. Please contact an Administrator."))
  1049. fields_to_export = [fix_import_export_id_paths(f) for f in fields_to_export]
  1050. return {'datas': self._export_rows(fields_to_export)}
  1051. @api.model
  1052. def load(self, fields, data):
  1053. """
  1054. Attempts to load the data matrix, and returns a list of ids (or
  1055. ``False`` if there was an error and no id could be generated) and a
  1056. list of messages.
  1057. The ids are those of the records created and saved (in database), in
  1058. the same order they were extracted from the file. They can be passed
  1059. directly to :meth:`~read`
  1060. :param fields: list of fields to import, at the same index as the corresponding data
  1061. :type fields: list(str)
  1062. :param data: row-major matrix of data to import
  1063. :type data: list(list(str))
  1064. :returns: {ids: list(int)|False, messages: [Message][, lastrow: int]}
  1065. """
  1066. self.env.flush_all()
  1067. # determine values of mode, current_module and noupdate
  1068. mode = self._context.get('mode', 'init')
  1069. current_module = self._context.get('module', '__import__')
  1070. noupdate = self._context.get('noupdate', False)
  1071. # add current module in context for the conversion of xml ids
  1072. self = self.with_context(_import_current_module=current_module)
  1073. cr = self._cr
  1074. sp = cr.savepoint(flush=False)
  1075. fields = [fix_import_export_id_paths(f) for f in fields]
  1076. fg = self.fields_get()
  1077. ids = []
  1078. messages = []
  1079. # list of (xid, vals, info) for records to be created in batch
  1080. batch = []
  1081. batch_xml_ids = set()
  1082. # models in which we may have created / modified data, therefore might
  1083. # require flushing in order to name_search: the root model and any
  1084. # o2m
  1085. creatable_models = {self._name}
  1086. for field_path in fields:
  1087. if field_path[0] in (None, 'id', '.id'):
  1088. continue
  1089. model_fields = self._fields
  1090. for field_name in field_path:
  1091. if field_name in (None, 'id', '.id'):
  1092. break
  1093. if isinstance(model_fields.get(field_name), odoo.fields.One2many):
  1094. comodel = model_fields[field_name].comodel_name
  1095. creatable_models.add(comodel)
  1096. model_fields = self.env[comodel]._fields
  1097. def flush(*, xml_id=None, model=None):
  1098. if not batch:
  1099. return
  1100. assert not (xml_id and model), \
  1101. "flush can specify *either* an external id or a model, not both"
  1102. if xml_id and xml_id not in batch_xml_ids:
  1103. if xml_id not in self.env:
  1104. return
  1105. if model and model not in creatable_models:
  1106. return
  1107. data_list = [
  1108. dict(xml_id=xid, values=vals, info=info, noupdate=noupdate)
  1109. for xid, vals, info in batch
  1110. ]
  1111. batch.clear()
  1112. batch_xml_ids.clear()
  1113. # try to create in batch
  1114. global_error_message = None
  1115. try:
  1116. with cr.savepoint():
  1117. recs = self._load_records(data_list, mode == 'update')
  1118. ids.extend(recs.ids)
  1119. return
  1120. except psycopg2.InternalError as e:
  1121. # broken transaction, exit and hope the source error was already logged
  1122. if not any(message['type'] == 'error' for message in messages):
  1123. info = data_list[0]['info']
  1124. messages.append(dict(info, type='error', message=_(u"Unknown database error: '%s'", e)))
  1125. return
  1126. except UserError as e:
  1127. global_error_message = dict(data_list[0]['info'], type='error', message=str(e))
  1128. except Exception:
  1129. pass
  1130. errors = 0
  1131. # try again, this time record by record
  1132. for i, rec_data in enumerate(data_list, 1):
  1133. try:
  1134. with cr.savepoint():
  1135. rec = self._load_records([rec_data], mode == 'update')
  1136. ids.append(rec.id)
  1137. except psycopg2.Warning as e:
  1138. info = rec_data['info']
  1139. messages.append(dict(info, type='warning', message=str(e)))
  1140. except psycopg2.Error as e:
  1141. info = rec_data['info']
  1142. messages.append(dict(info, type='error', **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
  1143. # Failed to write, log to messages, rollback savepoint (to
  1144. # avoid broken transaction) and keep going
  1145. errors += 1
  1146. except UserError as e:
  1147. info = rec_data['info']
  1148. messages.append(dict(info, type='error', message=str(e)))
  1149. errors += 1
  1150. except Exception as e:
  1151. _logger.debug("Error while loading record", exc_info=True)
  1152. info = rec_data['info']
  1153. message = _('Unknown error during import: %(error_type)s: %(error_message)s', error_type=type(e), error_message=e)
  1154. moreinfo = _('Resolve other errors first')
  1155. messages.append(dict(info, type='error', message=message, moreinfo=moreinfo))
  1156. # Failed for some reason, perhaps due to invalid data supplied,
  1157. # rollback savepoint and keep going
  1158. errors += 1
  1159. if errors >= 10 and (errors >= i / 10):
  1160. messages.append({
  1161. 'type': 'warning',
  1162. 'message': _(u"Found more than 10 errors and more than one error per 10 records, interrupted to avoid showing too many errors.")
  1163. })
  1164. break
  1165. if errors > 0 and global_error_message and global_error_message not in messages:
  1166. # If we cannot create the records 1 by 1, we display the error raised when we created the records simultaneously
  1167. messages.insert(0, global_error_message)
  1168. # make 'flush' available to the methods below, in the case where XMLID
  1169. # resolution fails, for instance
  1170. flush_recordset = self.with_context(import_flush=flush, import_cache=LRU(1024))
  1171. # TODO: break load's API instead of smuggling via context?
  1172. limit = self._context.get('_import_limit')
  1173. if limit is None:
  1174. limit = float('inf')
  1175. extracted = flush_recordset._extract_records(fields, data, log=messages.append, limit=limit)
  1176. converted = flush_recordset._convert_records(extracted, log=messages.append)
  1177. info = {'rows': {'to': -1}}
  1178. for id, xid, record, info in converted:
  1179. if self.env.context.get('import_file') and self.env.context.get('import_skip_records'):
  1180. if any([record.get(field) is None for field in self.env.context['import_skip_records']]):
  1181. continue
  1182. if xid:
  1183. xid = xid if '.' in xid else "%s.%s" % (current_module, xid)
  1184. batch_xml_ids.add(xid)
  1185. elif id:
  1186. record['id'] = id
  1187. batch.append((xid, record, info))
  1188. flush()
  1189. if any(message['type'] == 'error' for message in messages):
  1190. sp.rollback()
  1191. ids = False
  1192. # cancel all changes done to the registry/ormcache
  1193. self.pool.reset_changes()
  1194. sp.close(rollback=False)
  1195. nextrow = info['rows']['to'] + 1
  1196. if nextrow < limit:
  1197. nextrow = 0
  1198. return {
  1199. 'ids': ids,
  1200. 'messages': messages,
  1201. 'nextrow': nextrow,
  1202. }
  1203. def _extract_records(self, field_paths, data, log=lambda a: None, limit=float('inf')):
  1204. """ Generates record dicts from the data sequence.
  1205. The result is a generator of dicts mapping field names to raw
  1206. (unconverted, unvalidated) values.
  1207. For relational fields, if sub-fields were provided the value will be
  1208. a list of sub-records
  1209. The following sub-fields may be set on the record (by key):
  1210. * None is the display_name for the record (to use with name_create/name_search)
  1211. * "id" is the External ID for the record
  1212. * ".id" is the Database ID for the record
  1213. """
  1214. fields = self._fields
  1215. get_o2m_values = itemgetter_tuple([
  1216. index
  1217. for index, fnames in enumerate(field_paths)
  1218. if fnames[0] in fields and fields[fnames[0]].type == 'one2many'
  1219. ])
  1220. get_nono2m_values = itemgetter_tuple([
  1221. index
  1222. for index, fnames in enumerate(field_paths)
  1223. if fnames[0] not in fields or fields[fnames[0]].type != 'one2many'
  1224. ])
  1225. # Checks if the provided row has any non-empty one2many fields
  1226. def only_o2m_values(row):
  1227. return any(get_o2m_values(row)) and not any(get_nono2m_values(row))
  1228. property_definitions = {}
  1229. property_columns = defaultdict(list)
  1230. for fname, *__ in field_paths:
  1231. if not fname:
  1232. continue
  1233. if '.' not in fname:
  1234. if fname not in fields:
  1235. raise ValueError(f'Invalid field name {fname!r}')
  1236. continue
  1237. f_prop_name, property_name = fname.split('.')
  1238. if f_prop_name not in fields or fields[f_prop_name].type != 'properties':
  1239. # Can be .id
  1240. continue
  1241. definition = self.get_property_definition(fname)
  1242. if not definition:
  1243. # Can happen if someone remove the property, UserError ?
  1244. raise ValueError(f"Property {property_name!r} doesn't have any definition on {fname!r} field")
  1245. property_definitions[fname] = definition
  1246. property_columns[f_prop_name].append(fname)
  1247. # m2o fields can't be on multiple lines so don't take it in account
  1248. # for only_o2m_values rows filter, but special-case it later on to
  1249. # be handled with relational fields (as it can have subfields).
  1250. def is_relational(fname):
  1251. return (
  1252. fname in fields and
  1253. fields[fname].relational
  1254. ) or (
  1255. fname in property_definitions and
  1256. property_definitions[fname].get('type') in ('many2one', 'many2many')
  1257. )
  1258. index = 0
  1259. while index < len(data) and index < limit:
  1260. row = data[index]
  1261. # copy non-relational fields to record dict
  1262. record = {
  1263. fnames[0]: value
  1264. for fnames, value in zip(field_paths, row)
  1265. if not is_relational(fnames[0])
  1266. }
  1267. # Get all following rows which have relational values attached to
  1268. # the current record (no non-relational values)
  1269. record_span = itertools.takewhile(
  1270. only_o2m_values, itertools.islice(data, index + 1, None))
  1271. # stitch record row back on for relational fields
  1272. record_span = list(itertools.chain([row], record_span))
  1273. for relfield, *__ in field_paths:
  1274. if not is_relational(relfield):
  1275. continue
  1276. if relfield not in property_definitions:
  1277. comodel = self.env[fields[relfield].comodel_name]
  1278. else:
  1279. comodel = self.env[property_definitions[relfield]['comodel']]
  1280. # get only cells for this sub-field, should be strictly
  1281. # non-empty, field path [None] is for display_name field
  1282. indices, subfields = zip(*((index, fnames[1:] or [None])
  1283. for index, fnames in enumerate(field_paths)
  1284. if fnames[0] == relfield))
  1285. # return all rows which have at least one value for the
  1286. # subfields of relfield
  1287. relfield_data = [it for it in map(itemgetter_tuple(indices), record_span) if any(it)]
  1288. record[relfield] = [
  1289. subrecord
  1290. for subrecord, _subinfo in comodel._extract_records(subfields, relfield_data, log=log)
  1291. ]
  1292. for properties_fname, property_indexes_names in property_columns.items():
  1293. properties = []
  1294. for property_name in property_indexes_names:
  1295. value = record.pop(property_name)
  1296. properties.append(dict(**property_definitions[property_name], value=value))
  1297. record[properties_fname] = properties
  1298. yield record, {'rows': {
  1299. 'from': index,
  1300. 'to': index + len(record_span) - 1,
  1301. }}
  1302. index += len(record_span)
  1303. @api.model
  1304. def _convert_records(self, records, log=lambda a: None):
  1305. """ Converts records from the source iterable (recursive dicts of
  1306. strings) into forms which can be written to the database (via
  1307. ``self.create`` or ``(ir.model.data)._update``)
  1308. :returns: a list of triplets of (id, xid, record)
  1309. :rtype: list[(int|None, str|None, dict)]
  1310. """
  1311. field_names = {name: field.string for name, field in self._fields.items()}
  1312. if self.env.lang:
  1313. field_names.update(self.env['ir.model.fields'].get_field_string(self._name))
  1314. convert = self.env['ir.fields.converter'].for_model(self)
  1315. def _log(base, record, field, exception):
  1316. type = 'warning' if isinstance(exception, Warning) else 'error'
  1317. # logs the logical (not human-readable) field name for automated
  1318. # processing of response, but injects human readable in message
  1319. field_name = field_names[field]
  1320. exc_vals = dict(base, record=record, field=field_name)
  1321. record = dict(base, type=type, record=record, field=field,
  1322. message=str(exception.args[0]) % exc_vals)
  1323. if len(exception.args) > 1:
  1324. info = {}
  1325. if exception.args[1] and isinstance(exception.args[1], dict):
  1326. info = exception.args[1]
  1327. # ensure field_name is added to the exception. Used in import to
  1328. # concatenate multiple errors in the same block
  1329. info['field_name'] = field_name
  1330. record.update(info)
  1331. log(record)
  1332. for stream_index, (record, extras) in enumerate(records):
  1333. # xid
  1334. xid = record.get('id', False)
  1335. # dbid
  1336. dbid = False
  1337. if record.get('.id'):
  1338. try:
  1339. dbid = int(record['.id'])
  1340. except ValueError:
  1341. # in case of overridden id column
  1342. dbid = record['.id']
  1343. if not self.search([('id', '=', dbid)]):
  1344. log(dict(extras,
  1345. type='error',
  1346. record=stream_index,
  1347. field='.id',
  1348. message=_(u"Unknown database identifier '%s'", dbid)))
  1349. dbid = False
  1350. converted = convert(record, functools.partial(_log, extras, stream_index))
  1351. yield dbid, xid, converted, dict(extras, record=stream_index)
  1352. def _validate_fields(self, field_names, excluded_names=()):
  1353. """ Invoke the constraint methods for which at least one field name is
  1354. in ``field_names`` and none is in ``excluded_names``.
  1355. """
  1356. field_names = set(field_names)
  1357. excluded_names = set(excluded_names)
  1358. for check in self._constraint_methods:
  1359. if (not field_names.isdisjoint(check._constrains)
  1360. and excluded_names.isdisjoint(check._constrains)):
  1361. check(self)
  1362. @api.model
  1363. def default_get(self, fields_list):
  1364. """ default_get(fields_list) -> default_values
  1365. Return default values for the fields in ``fields_list``. Default
  1366. values are determined by the context, user defaults, user fallbacks
  1367. and the model itself.
  1368. :param list fields_list: names of field whose default is requested
  1369. :return: a dictionary mapping field names to their corresponding default values,
  1370. if they have a default value.
  1371. :rtype: dict
  1372. .. note::
  1373. Unrequested defaults won't be considered, there is no need to return a
  1374. value for fields whose names are not in `fields_list`.
  1375. """
  1376. defaults = {}
  1377. parent_fields = defaultdict(list)
  1378. ir_defaults = self.env['ir.default']._get_model_defaults(self._name)
  1379. for name in fields_list:
  1380. # 1. look up context
  1381. key = 'default_' + name
  1382. if key in self._context:
  1383. defaults[name] = self._context[key]
  1384. continue
  1385. field = self._fields.get(name)
  1386. if not field:
  1387. continue
  1388. # 2. look up default for non-company_dependent fields
  1389. if not field.company_dependent and name in ir_defaults:
  1390. defaults[name] = ir_defaults[name]
  1391. continue
  1392. # 3. look up field.default
  1393. if field.default:
  1394. defaults[name] = field.default(self)
  1395. continue
  1396. # 4. look up fallback for company_dependent fields
  1397. if field.company_dependent and name in ir_defaults:
  1398. defaults[name] = ir_defaults[name]
  1399. continue
  1400. # 5. delegate to parent model
  1401. if field.inherited:
  1402. field = field.related_field
  1403. parent_fields[field.model_name].append(field.name)
  1404. # convert default values to the right format
  1405. #
  1406. # we explicitly avoid using _convert_to_write() for x2many fields,
  1407. # because the latter leaves values like [(Command.LINK, 2),
  1408. # (Command.LINK, 3)], which are not supported by the web client as
  1409. # default values; stepping through the cache allows to normalize
  1410. # such a list to [(Command.SET, 0, [2, 3])], which is properly
  1411. # supported by the web client
  1412. for fname, value in defaults.items():
  1413. if fname in self._fields:
  1414. field = self._fields[fname]
  1415. value = field.convert_to_cache(value, self, validate=False)
  1416. defaults[fname] = field.convert_to_write(value, self)
  1417. # add default values for inherited fields
  1418. for model, names in parent_fields.items():
  1419. defaults.update(self.env[model].default_get(names))
  1420. return defaults
  1421. @api.model
  1422. def _rec_name_fallback(self):
  1423. # if self._rec_name is set, it belongs to self._fields
  1424. return self._rec_name or 'id'
  1425. @api.model
  1426. @api.readonly
  1427. def search_count(self, domain, limit=None):
  1428. """ search_count(domain[, limit=None]) -> int
  1429. Returns the number of records in the current model matching :ref:`the
  1430. provided domain <reference/orm/domains>`.
  1431. :param domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
  1432. list to match all records.
  1433. :param limit: maximum number of record to count (upperbound) (default: all)
  1434. This is a high-level method, which should not be overridden. Its actual
  1435. implementation is done by method :meth:`_search`.
  1436. """
  1437. query = self._search(domain, limit=limit)
  1438. return len(query)
  1439. @api.model
  1440. @api.readonly
  1441. @api.returns('self')
  1442. def search(self, domain, offset=0, limit=None, order=None) -> Self:
  1443. """ search(domain[, offset=0][, limit=None][, order=None])
  1444. Search for the records that satisfy the given ``domain``
  1445. :ref:`search domain <reference/orm/domains>`.
  1446. :param domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
  1447. list to match all records.
  1448. :param int offset: number of results to ignore (default: none)
  1449. :param int limit: maximum number of records to return (default: all)
  1450. :param str order: sort string
  1451. :returns: at most ``limit`` records matching the search criteria
  1452. :raise AccessError: if user is not allowed to access requested information
  1453. This is a high-level method, which should not be overridden. Its actual
  1454. implementation is done by method :meth:`_search`.
  1455. """
  1456. return self.search_fetch(domain, [], offset=offset, limit=limit, order=order)
  1457. @api.model
  1458. @api.readonly
  1459. @api.returns('self')
  1460. def search_fetch(self, domain, field_names, offset=0, limit=None, order=None):
  1461. """ search_fetch(domain, field_names[, offset=0][, limit=None][, order=None])
  1462. Search for the records that satisfy the given ``domain``
  1463. :ref:`search domain <reference/orm/domains>`, and fetch the given fields
  1464. to the cache. This method is like a combination of methods :meth:`search`
  1465. and :meth:`fetch`, but it performs both tasks with a minimal number of
  1466. SQL queries.
  1467. :param domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
  1468. list to match all records.
  1469. :param field_names: a collection of field names to fetch
  1470. :param int offset: number of results to ignore (default: none)
  1471. :param int limit: maximum number of records to return (default: all)
  1472. :param str order: sort string
  1473. :returns: at most ``limit`` records matching the search criteria
  1474. :raise AccessError: if user is not allowed to access requested information
  1475. """
  1476. # first determine a query that satisfies the domain and access rules
  1477. query = self._search(domain, offset=offset, limit=limit, order=order or self._order)
  1478. if query.is_empty():
  1479. # optimization: don't execute the query at all
  1480. return self.browse()
  1481. fields_to_fetch = self._determine_fields_to_fetch(field_names)
  1482. return self._fetch_query(query, fields_to_fetch)
  1483. #
  1484. # display_name, name_create, name_search
  1485. #
  1486. @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
  1487. def _compute_display_name(self):
  1488. """Compute the value of the `display_name` field.
  1489. The `display_name` field is a textual representation of the record.
  1490. This method can be overridden to change the representation. If needed,
  1491. it can be made field-dependent using :attr:`~odoo.api.depends` and
  1492. context-dependent using :attr:`~odoo.api.depends_context`.
  1493. """
  1494. if self._rec_name:
  1495. convert = self._fields[self._rec_name].convert_to_display_name
  1496. for record in self:
  1497. record.display_name = convert(record[self._rec_name], record)
  1498. else:
  1499. for record in self:
  1500. record.display_name = f"{record._name},{record.id}"
  1501. @api.model
  1502. def _search_display_name(self, operator, value):
  1503. """
  1504. Returns a domain that matches records whose display name matches the
  1505. given ``name`` pattern when compared with the given ``operator``.
  1506. This method is used to implement the search on the ``display_name``
  1507. field, and can be overridden to change the search criteria.
  1508. The default implementation searches the fields defined in `_rec_names_search`
  1509. or `_rec_name`.
  1510. """
  1511. search_fnames = self._rec_names_search or ([self._rec_name] if self._rec_name else [])
  1512. if not search_fnames:
  1513. _logger.warning("Cannot search on display_name, no _rec_name or _rec_names_search defined on %s", self._name)
  1514. # do not restrain anything
  1515. return expression.TRUE_DOMAIN
  1516. if operator.endswith('like') and not value and '=' not in operator:
  1517. # optimize out the default criterion of ``like ''`` that matches everything
  1518. # return all when operator is positive
  1519. return expression.FALSE_DOMAIN if operator in expression.NEGATIVE_TERM_OPERATORS else expression.TRUE_DOMAIN
  1520. aggregator = expression.AND if operator in expression.NEGATIVE_TERM_OPERATORS else expression.OR
  1521. domains = []
  1522. for field_name in search_fnames:
  1523. # field_name may be a sequence of field names (partner_id.name)
  1524. # retrieve the last field in the sequence
  1525. model = self
  1526. for fname in field_name.split('.'):
  1527. field = model._fields[fname]
  1528. model = self.env.get(field.comodel_name)
  1529. if field.relational:
  1530. # relational fields will trigger a _name_search on their comodel
  1531. domains.append([(field_name, operator, value)])
  1532. continue
  1533. try:
  1534. domains.append([(field_name, operator, field.convert_to_write(value, self))])
  1535. except ValueError:
  1536. pass # ignore that case if the value doesn't match the field type
  1537. return aggregator(domains)
  1538. @api.model
  1539. def name_create(self, name) -> tuple[int, str] | typing.Literal[False]:
  1540. """ name_create(name) -> record
  1541. Create a new record by calling :meth:`~.create` with only one value
  1542. provided: the display name of the new record.
  1543. The new record will be initialized with any default values
  1544. applicable to this model, or provided through the context. The usual
  1545. behavior of :meth:`~.create` applies.
  1546. :param name: display name of the record to create
  1547. :rtype: tuple
  1548. :return: the (id, display_name) pair value of the created record
  1549. """
  1550. if self._rec_name:
  1551. record = self.create({self._rec_name: name})
  1552. return record.id, record.display_name
  1553. else:
  1554. _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
  1555. return False
  1556. @api.model
  1557. @api.readonly
  1558. def name_search(self, name='', args=None, operator='ilike', limit=100) -> list[tuple[int, str]]:
  1559. """ name_search(name='', args=None, operator='ilike', limit=100)
  1560. Search for records that have a display name matching the given
  1561. ``name`` pattern when compared with the given ``operator``, while also
  1562. matching the optional search domain (``args``).
  1563. This is used for example to provide suggestions based on a partial
  1564. value for a relational field. Should usually behave as the reverse of
  1565. ``display_name``, but that is not guaranteed.
  1566. This method is equivalent to calling :meth:`~.search` with a search
  1567. domain based on ``display_name`` and mapping id and display_name on
  1568. the resulting search.
  1569. :param str name: the name pattern to match
  1570. :param list args: optional search domain (see :meth:`~.search` for
  1571. syntax), specifying further restrictions
  1572. :param str operator: domain operator for matching ``name``, such as
  1573. ``'like'`` or ``'='``.
  1574. :param int limit: optional max number of records to return
  1575. :rtype: list
  1576. :return: list of pairs ``(id, display_name)`` for all matching records.
  1577. """
  1578. domain = expression.AND([[('display_name', operator, name)], args or []])
  1579. records = self.search_fetch(domain, ['display_name'], limit=limit)
  1580. return [(record.id, record.display_name) for record in records.sudo()]
  1581. @api.model
  1582. def _add_missing_default_values(self, values):
  1583. # avoid overriding inherited values when parent is set
  1584. avoid_models = set()
  1585. def collect_models_to_avoid(model):
  1586. for parent_mname, parent_fname in model._inherits.items():
  1587. if parent_fname in values:
  1588. avoid_models.add(parent_mname)
  1589. else:
  1590. # manage the case where an ancestor parent field is set
  1591. collect_models_to_avoid(self.env[parent_mname])
  1592. collect_models_to_avoid(self)
  1593. def avoid(field):
  1594. # check whether the field is inherited from one of avoid_models
  1595. if avoid_models:
  1596. while field.inherited:
  1597. field = field.related_field
  1598. if field.model_name in avoid_models:
  1599. return True
  1600. return False
  1601. # compute missing fields
  1602. missing_defaults = [
  1603. name
  1604. for name, field in self._fields.items()
  1605. if name not in values
  1606. if not avoid(field)
  1607. ]
  1608. if missing_defaults:
  1609. # override defaults with the provided values, never allow the other way around
  1610. defaults = self.default_get(missing_defaults)
  1611. for name, value in defaults.items():
  1612. if self._fields[name].type == 'many2many' and value and isinstance(value[0], int):
  1613. # convert a list of ids into a list of commands
  1614. defaults[name] = [Command.set(value)]
  1615. elif self._fields[name].type == 'one2many' and value and isinstance(value[0], dict):
  1616. # convert a list of dicts into a list of commands
  1617. defaults[name] = [Command.create(x) for x in value]
  1618. defaults.update(values)
  1619. else:
  1620. defaults = values
  1621. # delegate the default properties to the properties field
  1622. for field in self._fields.values():
  1623. if field.type == 'properties':
  1624. defaults[field.name] = field._add_default_values(self.env, defaults)
  1625. return defaults
  1626. @classmethod
  1627. def clear_caches(cls):
  1628. """ Clear the caches
  1629. This clears the caches associated to methods decorated with
  1630. ``tools.ormcache``.
  1631. """
  1632. warnings.warn("Deprecated model.clear_cache(), use registry.clear_cache() instead", DeprecationWarning)
  1633. cls.pool.clear_all_caches()
  1634. @api.model
  1635. def _read_group(self, domain, groupby=(), aggregates=(), having=(), offset=0, limit=None, order=None):
  1636. """ Get fields aggregations specified by ``aggregates`` grouped by the given ``groupby``
  1637. fields where record are filtered by the ``domain``.
  1638. :param list domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
  1639. list to match all records.
  1640. :param list groupby: list of groupby descriptions by which the records will be grouped.
  1641. A groupby description is either a field (then it will be grouped by that field)
  1642. or a string `'field:granularity'`. Right now, the only supported granularities
  1643. are `'day'`, `'week'`, `'month'`, `'quarter'` or `'year'`, and they only make sense for
  1644. date/datetime fields.
  1645. :param list aggregates: list of aggregates specification.
  1646. Each element is `'field:agg'` (aggregate field with aggregation function `'agg'`).
  1647. The possible aggregation functions are the ones provided by
  1648. `PostgreSQL <https://www.postgresql.org/docs/current/static/functions-aggregate.html>`_,
  1649. `'count_distinct'` with the expected meaning and `'recordset'` to act like `'array_agg'`
  1650. converted into a recordset.
  1651. :param list having: A domain where the valid "fields" are the aggregates.
  1652. :param int offset: optional number of groups to skip
  1653. :param int limit: optional max number of groups to return
  1654. :param str order: optional ``order by`` specification, for
  1655. overriding the natural sort ordering of the groups,
  1656. see also :meth:`~.search`.
  1657. :return: list of tuple containing in the order the groups values and aggregates values (flatten):
  1658. `[(groupby_1_value, ... , aggregate_1_value_aggregate, ...), ...]`.
  1659. If group is related field, the value of it will be a recordset (with a correct prefetch set).
  1660. :rtype: list
  1661. :raise AccessError: if user is not allowed to access requested information
  1662. """
  1663. self.browse().check_access('read')
  1664. if expression.is_false(self, domain):
  1665. if not groupby:
  1666. # when there is no group, postgresql always return a row
  1667. return [tuple(
  1668. self._read_group_empty_value(spec)
  1669. for spec in itertools.chain(groupby, aggregates)
  1670. )]
  1671. return []
  1672. query = self._search(domain)
  1673. query.limit = limit
  1674. query.offset = offset
  1675. groupby_terms: dict[str, SQL] = {
  1676. spec: self._read_group_groupby(spec, query)
  1677. for spec in groupby
  1678. }
  1679. if groupby_terms:
  1680. query.groupby = SQL(", ").join(groupby_terms.values())
  1681. query.having = self._read_group_having(having, query)
  1682. # _read_group_orderby may possibly extend query.groupby for orderby
  1683. query.order = self._read_group_orderby(order, groupby_terms, query)
  1684. select_terms: list[SQL] = [
  1685. self._read_group_select(spec, query)
  1686. for spec in aggregates
  1687. ]
  1688. # row_values: [(a1, b1, c1), (a2, b2, c2), ...]
  1689. row_values = self.env.execute_query(query.select(*groupby_terms.values(), *select_terms))
  1690. if not row_values:
  1691. return row_values
  1692. # post-process values column by column
  1693. column_iterator = zip(*row_values)
  1694. # column_result: [(a1, a2, ...), (b1, b2, ...), (c1, c2, ...)]
  1695. column_result = []
  1696. for spec in groupby:
  1697. column = self._read_group_postprocess_groupby(spec, next(column_iterator))
  1698. column_result.append(column)
  1699. for spec in aggregates:
  1700. column = self._read_group_postprocess_aggregate(spec, next(column_iterator))
  1701. column_result.append(column)
  1702. assert next(column_iterator, None) is None
  1703. # return [(a1, b1, c1), (a2, b2, c2), ...]
  1704. return list(zip(*column_result))
  1705. def _read_group_select(self, aggregate_spec: str, query: Query) -> SQL:
  1706. """ Return <SQL expression> corresponding to the given aggregation.
  1707. The method also checks whether the fields used in the aggregate are
  1708. accessible for reading.
  1709. """
  1710. if aggregate_spec == '__count':
  1711. return SQL("COUNT(*)")
  1712. fname, property_name, func = parse_read_group_spec(aggregate_spec)
  1713. if property_name:
  1714. raise ValueError(f"Invalid {aggregate_spec!r}, this dot notation is not supported")
  1715. if fname not in self:
  1716. raise ValueError(f"Invalid field {fname!r} on model {self._name!r} for {aggregate_spec!r}.")
  1717. if not func:
  1718. raise ValueError(f"Aggregate method is mandatory for {fname!r}")
  1719. if func not in READ_GROUP_AGGREGATE:
  1720. raise ValueError(f"Invalid aggregate method {func!r} for {aggregate_spec!r}.")
  1721. field = self._fields[fname]
  1722. if func == 'recordset' and not (field.relational or fname == 'id'):
  1723. raise ValueError(f"Aggregate method {func!r} can be only used on relational field (or id) (for {aggregate_spec!r}).")
  1724. sql_field = self._field_to_sql(self._table, fname, query)
  1725. return READ_GROUP_AGGREGATE[func](self._table, sql_field)
  1726. def _read_group_groupby(self, groupby_spec: str, query: Query) -> SQL:
  1727. """ Return <SQL expression> corresponding to the given groupby element.
  1728. The method also checks whether the fields used in the groupby are
  1729. accessible for reading.
  1730. """
  1731. fname, property_name, granularity = parse_read_group_spec(groupby_spec)
  1732. if fname not in self:
  1733. raise ValueError(f"Invalid field {fname!r} on model {self._name!r}")
  1734. field = self._fields[fname]
  1735. if field.type == 'properties':
  1736. sql_expr = self._read_group_groupby_properties(fname, property_name, query)
  1737. elif property_name:
  1738. raise ValueError(f"Property access on non-property field: {groupby_spec!r}")
  1739. elif granularity and field.type not in ('datetime', 'date', 'properties'):
  1740. raise ValueError(f"Granularity set on a no-datetime field or property: {groupby_spec!r}")
  1741. elif field.type == 'many2many':
  1742. alias = self._table
  1743. if field.related and not field.store:
  1744. __, field, alias = self._traverse_related_sql(alias, field, query)
  1745. if not field.store:
  1746. raise ValueError(f"Group by non-stored many2many field: {groupby_spec!r}")
  1747. # special case for many2many fields: prepare a query on the comodel
  1748. # in order to reuse the mechanism _apply_ir_rules, then inject the
  1749. # query as an extra condition of the left join
  1750. comodel = self.env[field.comodel_name]
  1751. coquery = comodel._where_calc([], active_test=False)
  1752. comodel._apply_ir_rules(coquery)
  1753. # LEFT JOIN {field.relation} AS rel_alias ON
  1754. # alias.id = rel_alias.{field.column1}
  1755. # AND rel_alias.{field.column2} IN ({coquery})
  1756. rel_alias = query.make_alias(alias, field.name)
  1757. condition = SQL(
  1758. "%s = %s",
  1759. SQL.identifier(alias, 'id'),
  1760. SQL.identifier(rel_alias, field.column1),
  1761. )
  1762. if coquery.where_clause:
  1763. condition = SQL(
  1764. "%s AND %s IN %s",
  1765. condition,
  1766. SQL.identifier(rel_alias, field.column2),
  1767. coquery.subselect(),
  1768. )
  1769. query.add_join("LEFT JOIN", rel_alias, field.relation, condition)
  1770. return SQL.identifier(rel_alias, field.column2)
  1771. else:
  1772. sql_expr = self._field_to_sql(self._table, fname, query)
  1773. if field.type == 'datetime' and (tz := self.env.context.get('tz')):
  1774. if tz in pytz.all_timezones_set:
  1775. sql_expr = SQL("timezone(%s, timezone('UTC', %s))", self.env.context['tz'], sql_expr)
  1776. else:
  1777. _logger.warning("Grouping in unknown / legacy timezone %r", tz)
  1778. if field.type in ('datetime', 'date') or (field.type == 'properties' and granularity):
  1779. if not granularity:
  1780. raise ValueError(f"Granularity not set on a date(time) field: {groupby_spec!r}")
  1781. if granularity not in READ_GROUP_ALL_TIME_GRANULARITY:
  1782. raise ValueError(f"Granularity specification isn't correct: {granularity!r}")
  1783. if granularity == 'week':
  1784. # first_week_day: 0=Monday, 1=Tuesday, ...
  1785. first_week_day = int(get_lang(self.env).week_start) - 1
  1786. days_offset = first_week_day and 7 - first_week_day
  1787. interval = f"-{days_offset} DAY"
  1788. sql_expr = SQL(
  1789. "(date_trunc('week', %s::timestamp - INTERVAL %s) + INTERVAL %s)",
  1790. sql_expr, interval, interval,
  1791. )
  1792. elif spec := READ_GROUP_NUMBER_GRANULARITY.get(granularity):
  1793. if granularity == 'day_of_week':
  1794. """
  1795. formula: ((7 - first_day_of_week_in_odoo) + result_from_SQL) % --> 0 based first day of week
  1796. week start on
  1797. monday sunday sat
  1798. 1 | 7 | 6 <-- first day of week in odoo
  1799. SQL | -----------------------
  1800. Monday 1 | 0 | 1 | 2
  1801. tuesday 2 | 1 | 2 | 3
  1802. wed 3 | 2 | 3 | 4
  1803. thurs 4 | 3 | 4 | 5
  1804. friday 5 | 4 | 5 | 6
  1805. sat 6 | 5 | 6 | 0
  1806. sun 7 | 6 | 0 | 1
  1807. """
  1808. first_week_day = int(get_lang(self.env, self.env.context.get('tz')).week_start)
  1809. sql_expr = SQL("mod(7 - %s + date_part(%s, %s)::int, 7)", first_week_day, spec, sql_expr)
  1810. else:
  1811. sql_expr = SQL("date_part(%s, %s)::int", spec, sql_expr)
  1812. else:
  1813. sql_expr = SQL("date_trunc(%s, %s::timestamp)", granularity, sql_expr)
  1814. # If the granularity is a part number, the result is a number (double) so no conversion is needed
  1815. if field.type == 'date' and granularity not in READ_GROUP_NUMBER_GRANULARITY:
  1816. # If the granularity uses date_trunc, we need to convert the timestamp back to a date.
  1817. sql_expr = SQL("%s::date", sql_expr)
  1818. elif field.type == 'boolean':
  1819. sql_expr = SQL("COALESCE(%s, FALSE)", sql_expr)
  1820. return sql_expr
  1821. def _read_group_having(self, having_domain: list, query: Query) -> SQL:
  1822. """ Return <SQL expression> corresponding to the having domain.
  1823. """
  1824. if not having_domain:
  1825. return SQL()
  1826. stack: list[SQL] = []
  1827. SUPPORTED = ('in', 'not in', '<', '>', '<=', '>=', '=', '!=')
  1828. for item in reversed(having_domain):
  1829. if item == '!':
  1830. stack.append(SQL("(NOT %s)", stack.pop()))
  1831. elif item == '&':
  1832. stack.append(SQL("(%s AND %s)", stack.pop(), stack.pop()))
  1833. elif item == '|':
  1834. stack.append(SQL("(%s OR %s)", stack.pop(), stack.pop()))
  1835. elif isinstance(item, (list, tuple)) and len(item) == 3:
  1836. left, operator, right = item
  1837. if operator not in SUPPORTED:
  1838. raise ValueError(f"Invalid having clause {item!r}: supported comparators are {SUPPORTED}")
  1839. sql_left = self._read_group_select(left, query)
  1840. sql_operator = expression.SQL_OPERATORS[operator]
  1841. stack.append(SQL("%s %s %s", sql_left, sql_operator, right))
  1842. else:
  1843. raise ValueError(f"Invalid having clause {item!r}: it should be a domain-like clause")
  1844. while len(stack) > 1:
  1845. stack.append(SQL("(%s AND %s)", stack.pop(), stack.pop()))
  1846. return stack[0]
  1847. def _read_group_orderby(self, order: str, groupby_terms: dict[str, SQL],
  1848. query: Query) -> SQL:
  1849. """ Return (<SQL expression>, <SQL expression>)
  1850. corresponding to the given order and groupby terms.
  1851. :param order: the order specification
  1852. :param groupby_terms: the group by terms mapping ({spec: sql_expression})
  1853. :param query: The query we are building
  1854. """
  1855. if order:
  1856. traverse_many2one = True
  1857. else:
  1858. order = ','.join(groupby_terms)
  1859. traverse_many2one = False
  1860. if not order:
  1861. return SQL()
  1862. orderby_terms = []
  1863. for order_part in order.split(','):
  1864. order_match = regex_order.match(order_part)
  1865. if not order_match:
  1866. raise ValueError(f"Invalid order {order!r} for _read_group()")
  1867. term = order_match['term']
  1868. direction = (order_match['direction'] or 'ASC').upper()
  1869. nulls = (order_match['nulls'] or '').upper()
  1870. sql_direction = SQL(direction) if direction in ('ASC', 'DESC') else SQL()
  1871. sql_nulls = SQL(nulls) if nulls in ('NULLS FIRST', 'NULLS LAST') else SQL()
  1872. if term not in groupby_terms:
  1873. try:
  1874. sql_expr = self._read_group_select(term, query)
  1875. except ValueError as e:
  1876. raise ValueError(f"Order term {order_part!r} is not a valid aggregate nor valid groupby") from e
  1877. orderby_terms.append(SQL("%s %s %s", sql_expr, sql_direction, sql_nulls))
  1878. continue
  1879. field = self._fields.get(term)
  1880. if (
  1881. traverse_many2one and field and field.type == 'many2one'
  1882. and self.env[field.comodel_name]._order != 'id'
  1883. ):
  1884. if sql_order := self._order_to_sql(f'{term} {direction} {nulls}', query):
  1885. orderby_terms.append(sql_order)
  1886. else:
  1887. sql_expr = groupby_terms[term]
  1888. orderby_terms.append(SQL("%s %s %s", sql_expr, sql_direction, sql_nulls))
  1889. return SQL(", ").join(orderby_terms)
  1890. @api.model
  1891. def _read_group_empty_value(self, spec):
  1892. """ Return the empty value corresponding to the given groupby spec or aggregate spec. """
  1893. if spec == '__count':
  1894. return 0
  1895. fname, __, func = parse_read_group_spec(spec) # func is either None, granularity or an aggregate
  1896. if func in ('count', 'count_distinct'):
  1897. return 0
  1898. if func == 'array_agg':
  1899. return []
  1900. field = self._fields[fname]
  1901. if (not func or func == 'recordset') and (field.relational or fname == 'id'):
  1902. return self.env[field.comodel_name] if field.relational else self.env[self._name]
  1903. return False
  1904. def _read_group_postprocess_groupby(self, groupby_spec, raw_values):
  1905. """ Convert the given values of ``groupby_spec``
  1906. from PostgreSQL to the format returned by method ``_read_group()``.
  1907. The formatting rules can be summarized as:
  1908. - groupby values of relational fields are converted to recordsets with a correct prefetch set;
  1909. - NULL values are converted to empty values corresponding to the given aggregate.
  1910. """
  1911. empty_value = self._read_group_empty_value(groupby_spec)
  1912. fname, *__ = parse_read_group_spec(groupby_spec)
  1913. field = self._fields[fname]
  1914. if field.relational or fname == 'id':
  1915. Model = self.pool[field.comodel_name] if field.relational else self.pool[self._name]
  1916. prefetch_ids = tuple(raw_value for raw_value in raw_values if raw_value)
  1917. def recordset(value):
  1918. return Model(self.env, (value,), prefetch_ids) if value else empty_value
  1919. return (recordset(value) for value in raw_values)
  1920. return ((value if value is not None else empty_value) for value in raw_values)
  1921. def _read_group_postprocess_aggregate(self, aggregate_spec, raw_values):
  1922. """ Convert the given values of ``aggregate_spec``
  1923. from PostgreSQL to the format returned by method ``_read_group()``.
  1924. The formatting rules can be summarized as:
  1925. - 'recordset' aggregates are turned into recordsets with a correct prefetch set;
  1926. - NULL values are converted to empty values corresponding to the given aggregate.
  1927. """
  1928. empty_value = self._read_group_empty_value(aggregate_spec)
  1929. if aggregate_spec == '__count':
  1930. return ((value if value is not None else empty_value) for value in raw_values)
  1931. fname, __, func = parse_read_group_spec(aggregate_spec)
  1932. if func == 'recordset':
  1933. field = self._fields[fname]
  1934. Model = self.pool[field.comodel_name] if field.relational else self.pool[self._name]
  1935. prefetch_ids = tuple(unique(
  1936. id_
  1937. for array_values in raw_values if array_values
  1938. for id_ in array_values if id_
  1939. ))
  1940. def recordset(value):
  1941. if not value:
  1942. return empty_value
  1943. ids = tuple(unique(id_ for id_ in value if id_))
  1944. return Model(self.env, ids, prefetch_ids)
  1945. return (recordset(value) for value in raw_values)
  1946. return ((value if value is not None else empty_value) for value in raw_values)
  1947. @api.model
  1948. def _read_group_expand_full(self, groups, domain):
  1949. """Extend the group to include all target records by default."""
  1950. return groups.search([])
  1951. @api.model
  1952. def _read_group_fill_results(self, domain, groupby, annoted_aggregates, read_group_result, read_group_order=None):
  1953. """Helper method for filling in empty groups for all possible values of
  1954. the field being grouped by"""
  1955. field_name = groupby.split('.')[0].split(':')[0]
  1956. field = self._fields[field_name]
  1957. if not field or not field.group_expand:
  1958. return read_group_result
  1959. # field.group_expand is a callable or the name of a method, that returns
  1960. # the groups that we want to display for this field, in the form of a
  1961. # recordset or a list of values (depending on the type of the field).
  1962. # This is useful to implement kanban views for instance, where some
  1963. # columns should be displayed even if they don't contain any record.
  1964. group_expand = field.group_expand
  1965. if isinstance(group_expand, str):
  1966. group_expand = getattr(self.env.registry[self._name], group_expand)
  1967. assert callable(group_expand)
  1968. # determine all groups that should be returned
  1969. values = [line[groupby] for line in read_group_result if line[groupby]]
  1970. if field.relational:
  1971. # groups is a recordset; determine order on groups's model
  1972. groups = self.env[field.comodel_name].browse([value.id for value in values])
  1973. values = group_expand(self, groups, domain).sudo()
  1974. if read_group_order == groupby + ' desc':
  1975. values.browse(reversed(values._ids))
  1976. value2key = lambda value: value and value.id
  1977. else:
  1978. # groups is a list of values
  1979. values = group_expand(self, values, domain)
  1980. if read_group_order == groupby + ' desc':
  1981. values.reverse()
  1982. value2key = lambda value: value
  1983. # Merge the current results (list of dicts) with all groups. Determine
  1984. # the global order of results groups, which is supposed to be in the
  1985. # same order as read_group_result (in the case of a many2one field).
  1986. read_group_result_as_dict = {}
  1987. for line in read_group_result:
  1988. read_group_result_as_dict[value2key(line[groupby])] = line
  1989. empty_item = {
  1990. name: self._read_group_empty_value(spec)
  1991. for name, spec in annoted_aggregates.items()
  1992. }
  1993. result = {}
  1994. # fill result with the values order
  1995. for value in values:
  1996. key = value2key(value)
  1997. if key in read_group_result_as_dict:
  1998. result[key] = read_group_result_as_dict.pop(key)
  1999. else:
  2000. result[key] = dict(empty_item, **{groupby: value})
  2001. for line in read_group_result_as_dict.values():
  2002. key = value2key(line[groupby])
  2003. result[key] = line
  2004. # add folding information if present
  2005. if field.relational and groups._fold_name in groups._fields:
  2006. fold = {group.id: group[groups._fold_name]
  2007. for group in groups.browse([key for key in result if key])}
  2008. for key, line in result.items():
  2009. line['__fold'] = fold.get(key, False)
  2010. return list(result.values())
  2011. @api.model
  2012. def _read_group_fill_temporal(self, data, groupby, annoted_aggregates,
  2013. fill_from=False, fill_to=False, min_groups=False):
  2014. """Helper method for filling date/datetime 'holes' in a result set.
  2015. We are in a use case where data are grouped by a date field (typically
  2016. months but it could be any other interval) and displayed in a chart.
  2017. Assume we group records by month, and we only have data for June,
  2018. September and December. By default, plotting the result gives something
  2019. like::
  2020. ___
  2021. ___ | |
  2022. | | ___ | |
  2023. |___||___||___|
  2024. Jun Sep Dec
  2025. The problem is that December data immediately follow September data,
  2026. which is misleading for the user. Adding explicit zeroes for missing
  2027. data gives something like::
  2028. ___
  2029. ___ | |
  2030. | | ___ | |
  2031. |___| ___ ___ |___| ___ ___ |___|
  2032. Jun Jul Aug Sep Oct Nov Dec
  2033. To customize this output, the context key "fill_temporal" can be used
  2034. under its dictionary format, which has 3 attributes : fill_from,
  2035. fill_to, min_groups (see params of this function)
  2036. Fill between bounds:
  2037. Using either `fill_from` and/or `fill_to` attributes, we can further
  2038. specify that at least a certain date range should be returned as
  2039. contiguous groups. Any group outside those bounds will not be removed,
  2040. but the filling will only occur between the specified bounds. When not
  2041. specified, existing groups will be used as bounds, if applicable.
  2042. By specifying such bounds, we can get empty groups before/after any
  2043. group with data.
  2044. If we want to fill groups only between August (fill_from)
  2045. and October (fill_to)::
  2046. ___
  2047. ___ | |
  2048. | | ___ | |
  2049. |___| ___ |___| ___ |___|
  2050. Jun Aug Sep Oct Dec
  2051. We still get June and December. To filter them out, we should match
  2052. `fill_from` and `fill_to` with the domain e.g. ``['&',
  2053. ('date_field', '>=', 'YYYY-08-01'), ('date_field', '<', 'YYYY-11-01')]``::
  2054. ___
  2055. ___ |___| ___
  2056. Aug Sep Oct
  2057. Minimal filling amount:
  2058. Using `min_groups`, we can specify that we want at least that amount of
  2059. contiguous groups. This amount is guaranteed to be provided from
  2060. `fill_from` if specified, or from the lowest existing group otherwise.
  2061. This amount is not restricted by `fill_to`. If there is an existing
  2062. group before `fill_from`, `fill_from` is still used as the starting
  2063. group for min_groups, because the filling does not apply on that
  2064. existing group. If neither `fill_from` nor `fill_to` is specified, and
  2065. there is no existing group, no group will be returned.
  2066. If we set min_groups = 4::
  2067. ___
  2068. ___ |___| ___ ___
  2069. Aug Sep Oct Nov
  2070. :param list data: the data containing groups
  2071. :param list groupby: list of fields being grouped on
  2072. :param list annoted_aggregates: dict of "<key_name>:<aggregate specification>"
  2073. :param str fill_from: (inclusive) string representation of a
  2074. date/datetime, start bound of the fill_temporal range
  2075. formats: date -> %Y-%m-%d, datetime -> %Y-%m-%d %H:%M:%S
  2076. :param str fill_to: (inclusive) string representation of a
  2077. date/datetime, end bound of the fill_temporal range
  2078. formats: date -> %Y-%m-%d, datetime -> %Y-%m-%d %H:%M:%S
  2079. :param int min_groups: minimal amount of required groups for the
  2080. fill_temporal range (should be >= 1)
  2081. :rtype: list
  2082. :return: list
  2083. """
  2084. # TODO: remove min_groups
  2085. first_group = groupby[0]
  2086. field_name = first_group.split(':')[0].split(".")[0]
  2087. field = self._fields[field_name]
  2088. if field.type not in ('date', 'datetime') and not (field.type == 'properties' and ':' in first_group):
  2089. return data
  2090. granularity = first_group.split(':')[1] if ':' in first_group else 'month'
  2091. days_offset = 0
  2092. if granularity == 'week':
  2093. # _read_group_process_groupby week groups are dependent on the
  2094. # locale, so filled groups should be too to avoid overlaps.
  2095. first_week_day = int(get_lang(self.env).week_start) - 1
  2096. days_offset = first_week_day and 7 - first_week_day
  2097. interval = READ_GROUP_TIME_GRANULARITY[granularity]
  2098. tz = False
  2099. if field.type == 'datetime' and self._context.get('tz') in pytz.all_timezones_set:
  2100. tz = pytz.timezone(self._context['tz'])
  2101. # TODO: refactor remaing lines here
  2102. # existing non null datetimes
  2103. existing = [d[first_group] for d in data if d[first_group]] or [None]
  2104. # assumption: existing data is sorted by field 'groupby_name'
  2105. existing_from, existing_to = existing[0], existing[-1]
  2106. if fill_from:
  2107. fill_from = odoo.fields.Datetime.to_datetime(fill_from) if isinstance(fill_from, datetime.datetime) else odoo.fields.Date.to_date(fill_from)
  2108. fill_from = date_utils.start_of(fill_from, granularity) - datetime.timedelta(days=days_offset)
  2109. if tz:
  2110. fill_from = tz.localize(fill_from)
  2111. elif existing_from:
  2112. fill_from = existing_from
  2113. if fill_to:
  2114. fill_to = odoo.fields.Datetime.to_datetime(fill_to) if isinstance(fill_to, datetime.datetime) else odoo.fields.Date.to_date(fill_to)
  2115. fill_to = date_utils.start_of(fill_to, granularity) - datetime.timedelta(days=days_offset)
  2116. if tz:
  2117. fill_to = tz.localize(fill_to)
  2118. elif existing_to:
  2119. fill_to = existing_to
  2120. if not fill_to and fill_from:
  2121. fill_to = fill_from
  2122. if not fill_from and fill_to:
  2123. fill_from = fill_to
  2124. if not fill_from and not fill_to:
  2125. return data
  2126. if min_groups > 0:
  2127. fill_to = max(fill_to, fill_from + (min_groups - 1) * interval)
  2128. if fill_to < fill_from:
  2129. return data
  2130. required_dates = date_utils.date_range(fill_from, fill_to, interval)
  2131. if existing[0] is None:
  2132. existing = list(required_dates)
  2133. else:
  2134. existing = sorted(set().union(existing, required_dates))
  2135. empty_item = {
  2136. name: self._read_group_empty_value(spec)
  2137. for name, spec in annoted_aggregates.items()
  2138. }
  2139. for group in groupby[1:]:
  2140. empty_item[group] = self._read_group_empty_value(group)
  2141. grouped_data = collections.defaultdict(list)
  2142. for d in data:
  2143. grouped_data[d[first_group]].append(d)
  2144. result = []
  2145. for dt in existing:
  2146. result.extend(grouped_data[dt] or [dict(empty_item, **{first_group: dt})])
  2147. if False in grouped_data:
  2148. result.extend(grouped_data[False])
  2149. return result
  2150. @api.model
  2151. def _read_group_format_result(self, rows_dict, lazy_groupby):
  2152. """
  2153. Helper method to format the data contained in the dictionary data by
  2154. adding the domain corresponding to its values, the groupbys in the
  2155. context and by properly formatting the date/datetime values.
  2156. :param data: a single group
  2157. :param annotated_groupbys: expanded grouping metainformation
  2158. :param groupby: original grouping metainformation
  2159. """
  2160. for group in lazy_groupby:
  2161. field_name = group.split(':')[0].split('.')[0]
  2162. field = self._fields[field_name]
  2163. if field.type in ('date', 'datetime'):
  2164. granularity = group.split(':')[1] if ':' in group else 'month'
  2165. if granularity in READ_GROUP_TIME_GRANULARITY:
  2166. locale = get_lang(self.env).code
  2167. fmt = DEFAULT_SERVER_DATETIME_FORMAT if field.type == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
  2168. interval = READ_GROUP_TIME_GRANULARITY[granularity]
  2169. elif field.type == "properties":
  2170. self._read_group_format_result_properties(rows_dict, group)
  2171. continue
  2172. for row in rows_dict:
  2173. value = row[group]
  2174. if isinstance(value, BaseModel):
  2175. row[group] = (value.id, value.sudo().display_name) if value else False
  2176. value = value.id
  2177. if not value and field.type == 'many2many':
  2178. other_values = [other_row[group][0] if isinstance(other_row[group], tuple)
  2179. else other_row[group].id if isinstance(other_row[group], BaseModel)
  2180. else other_row[group] for other_row in rows_dict if other_row[group]]
  2181. additional_domain = [(field_name, 'not in', other_values)]
  2182. else:
  2183. additional_domain = [(field_name, '=', value)]
  2184. if field.type in ('date', 'datetime'):
  2185. if value and isinstance(value, (datetime.date, datetime.datetime)):
  2186. range_start = value
  2187. range_end = value + interval
  2188. if field.type == 'datetime':
  2189. tzinfo = None
  2190. if self._context.get('tz') in pytz.all_timezones_set:
  2191. tzinfo = pytz.timezone(self._context['tz'])
  2192. range_start = tzinfo.localize(range_start).astimezone(pytz.utc)
  2193. # take into account possible hour change between start and end
  2194. range_end = tzinfo.localize(range_end).astimezone(pytz.utc)
  2195. label = babel.dates.format_datetime(
  2196. range_start, format=READ_GROUP_DISPLAY_FORMAT[granularity],
  2197. tzinfo=tzinfo, locale=locale
  2198. )
  2199. else:
  2200. label = babel.dates.format_date(
  2201. value, format=READ_GROUP_DISPLAY_FORMAT[granularity],
  2202. locale=locale
  2203. )
  2204. # special case weeks because babel is broken *and*
  2205. # ubuntu reverted a change so it's also inconsistent
  2206. if granularity == 'week':
  2207. year, week = date_utils.weeknumber(
  2208. babel.Locale.parse(locale),
  2209. value, # provide date or datetime without UTC conversion
  2210. )
  2211. label = f"W{week} {year:04}"
  2212. range_start = range_start.strftime(fmt)
  2213. range_end = range_end.strftime(fmt)
  2214. row[group] = label # TODO should put raw data
  2215. row.setdefault('__range', {})[group] = {'from': range_start, 'to': range_end}
  2216. additional_domain = [
  2217. '&',
  2218. (field_name, '>=', range_start),
  2219. (field_name, '<', range_end),
  2220. ]
  2221. elif value is not None and granularity in READ_GROUP_NUMBER_GRANULARITY:
  2222. additional_domain = [(f"{field_name}.{granularity}", '=', value)]
  2223. elif not value:
  2224. # Set the __range of the group containing records with an unset
  2225. # date/datetime field value to False.
  2226. row.setdefault('__range', {})[group] = False
  2227. row['__domain'] = expression.AND([row['__domain'], additional_domain])
  2228. def _read_group_format_result_properties(self, rows_dict, group):
  2229. """Modify the final read group properties result.
  2230. Replace the relational properties ids by a tuple with their display names,
  2231. replace the "raw" tags and selection values by a list containing their labels.
  2232. Adapt the domains for the Falsy group (we can't just keep (selection, =, False)
  2233. e.g. because some values in database might correspond to option that have
  2234. been remove on the parent).
  2235. """
  2236. if '.' not in group:
  2237. raise ValueError('You must choose the property you want to group by.')
  2238. fullname, __, func = group.partition(':')
  2239. definition = self.get_property_definition(fullname)
  2240. property_type = definition.get('type')
  2241. if property_type == 'selection':
  2242. options = definition.get('selection') or []
  2243. options = tuple(option[0] for option in options)
  2244. for row in rows_dict:
  2245. if not row[fullname]:
  2246. # can not do ('selection', '=', False) because we might have
  2247. # option in database that does not exist anymore
  2248. additional_domain = expression.OR([
  2249. [(fullname, '=', False)],
  2250. [(fullname, 'not in', options)],
  2251. ])
  2252. else:
  2253. additional_domain = [(fullname, '=', row[fullname])]
  2254. row['__domain'] = expression.AND([row['__domain'], additional_domain])
  2255. elif property_type == 'many2one':
  2256. comodel = definition.get('comodel')
  2257. prefetch_ids = tuple(row[fullname] for row in rows_dict if row[fullname])
  2258. all_groups = tuple(row[fullname] for row in rows_dict if row[fullname])
  2259. for row in rows_dict:
  2260. if not row[fullname]:
  2261. # can not only do ('many2one', '=', False) because we might have
  2262. # record in database that does not exist anymore
  2263. additional_domain = expression.OR([
  2264. [(fullname, '=', False)],
  2265. [(fullname, 'not in', all_groups)],
  2266. ])
  2267. else:
  2268. additional_domain = [(fullname, '=', row[fullname])]
  2269. record = self.env[comodel].browse(row[fullname]).with_prefetch(prefetch_ids)
  2270. row[fullname] = (row[fullname], record.display_name)
  2271. row['__domain'] = expression.AND([row['__domain'], additional_domain])
  2272. elif property_type == 'many2many':
  2273. comodel = definition.get('comodel')
  2274. prefetch_ids = tuple(row[fullname] for row in rows_dict if row[fullname])
  2275. all_groups = tuple(row[fullname] for row in rows_dict if row[fullname])
  2276. for row in rows_dict:
  2277. if not row[fullname]:
  2278. additional_domain = expression.OR([
  2279. [(fullname, '=', False)],
  2280. expression.AND([[(fullname, 'not in', group)] for group in all_groups]),
  2281. ]) if all_groups else []
  2282. else:
  2283. additional_domain = [(fullname, 'in', row[fullname])]
  2284. record = self.env[comodel].browse(row[fullname]).with_prefetch(prefetch_ids)
  2285. row[fullname] = (row[fullname], record.display_name)
  2286. row['__domain'] = expression.AND([row['__domain'], additional_domain])
  2287. elif property_type == 'tags':
  2288. tags = definition.get('tags') or []
  2289. tags = {tag[0]: tag for tag in tags}
  2290. for row in rows_dict:
  2291. if not row[fullname]:
  2292. additional_domain = expression.OR([
  2293. [(fullname, '=', False)],
  2294. expression.AND([[(fullname, 'not in', tag)] for tag in tags]),
  2295. ]) if tags else []
  2296. else:
  2297. additional_domain = [(fullname, 'in', row[fullname])]
  2298. # replace tag raw value with list of raw value, label and color
  2299. row[fullname] = tags.get(row[fullname])
  2300. row['__domain'] = expression.AND([row['__domain'], additional_domain])
  2301. elif property_type in ('date', 'datetime'):
  2302. for row in rows_dict:
  2303. if not row[group]:
  2304. row[group] = False
  2305. row['__domain'] = expression.AND([row['__domain'], [(fullname, '=', False)]])
  2306. row['__range'] = {}
  2307. continue
  2308. # Date / Datetime are not JSONifiable, so they are stored as raw text
  2309. db_format = '%Y-%m-%d' if property_type == 'date' else '%Y-%m-%d %H:%M:%S'
  2310. if func == 'week':
  2311. # the value is the first day of the week (based on local)
  2312. start = row[group].strftime(db_format)
  2313. end = (row[group] + datetime.timedelta(days=7)).strftime(db_format)
  2314. else:
  2315. start = (date_utils.start_of(row[group], func)).strftime(db_format)
  2316. end = (date_utils.end_of(row[group], func) + datetime.timedelta(minutes=1)).strftime(db_format)
  2317. row['__domain'] = expression.AND([
  2318. row['__domain'],
  2319. [(fullname, '>=', start), (fullname, '<', end)],
  2320. ])
  2321. row['__range'] = {group: {'from': start, 'to': end}}
  2322. row[group] = babel.dates.format_date(
  2323. row[group],
  2324. format=READ_GROUP_DISPLAY_FORMAT[func],
  2325. locale=get_lang(self.env).code
  2326. )
  2327. else:
  2328. for row in rows_dict:
  2329. row['__domain'] = expression.AND([row['__domain'], [(fullname, '=', row[fullname])]])
  2330. @api.model
  2331. def _read_group_get_annotated_groupby(self, groupby, lazy):
  2332. groupby = [groupby] if isinstance(groupby, str) else groupby
  2333. lazy_groupby = groupby[:1] if lazy else groupby
  2334. annotated_groupby = {} # Key as the name in the result, value as the explicit groupby specification
  2335. for group_spec in lazy_groupby:
  2336. field_name, property_name, granularity = parse_read_group_spec(group_spec)
  2337. if field_name not in self._fields:
  2338. raise ValueError(f"Invalid field {field_name!r} on model {self._name!r}")
  2339. field = self._fields[field_name]
  2340. if property_name and field.type != 'properties':
  2341. raise ValueError(f"Property name {property_name!r} has to be used on a property field.")
  2342. if field.type in ('date', 'datetime'):
  2343. annotated_groupby[group_spec] = f"{field_name}:{granularity or 'month'}"
  2344. else:
  2345. annotated_groupby[group_spec] = group_spec
  2346. return annotated_groupby
  2347. @api.model
  2348. @api.readonly
  2349. def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
  2350. """Get the list of records in list view grouped by the given ``groupby`` fields.
  2351. :param list domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
  2352. list to match all records.
  2353. :param list fields: list of fields present in the list view specified on the object.
  2354. Each element is either 'field' (field name, using the default aggregation),
  2355. or 'field:agg' (aggregate field with aggregation function 'agg'),
  2356. or 'name:agg(field)' (aggregate field with 'agg' and return it as 'name').
  2357. The possible aggregation functions are the ones provided by
  2358. `PostgreSQL <https://www.postgresql.org/docs/current/static/functions-aggregate.html>`_
  2359. and 'count_distinct', with the expected meaning.
  2360. :param list groupby: list of groupby descriptions by which the records will be grouped.
  2361. A groupby description is either a field (then it will be grouped by that field).
  2362. For the dates an datetime fields, you can specify a granularity using the syntax 'field:granularity'.
  2363. The supported granularities are 'hour', 'day', 'week', 'month', 'quarter' or 'year';
  2364. Read_group also supports integer date parts:
  2365. 'year_number', 'quarter_number', 'month_number' 'iso_week_number', 'day_of_year', 'day_of_month',
  2366. 'day_of_week', 'hour_number', 'minute_number' and 'second_number'.
  2367. :param int offset: optional number of groups to skip
  2368. :param int limit: optional max number of groups to return
  2369. :param str orderby: optional ``order by`` specification, for
  2370. overriding the natural sort ordering of the
  2371. groups, see also :meth:`~.search`
  2372. (supported only for many2one fields currently)
  2373. :param bool lazy: if true, the results are only grouped by the first groupby and the
  2374. remaining groupbys are put in the __context key. If false, all the groupbys are
  2375. done in one call.
  2376. :return: list of dictionaries(one dictionary for each record) containing:
  2377. * the values of fields grouped by the fields in ``groupby`` argument
  2378. * __domain: list of tuples specifying the search criteria
  2379. * __context: dictionary with argument like ``groupby``
  2380. * __range: (date/datetime only) dictionary with field_name:granularity as keys
  2381. mapping to a dictionary with keys: "from" (inclusive) and "to" (exclusive)
  2382. mapping to a string representation of the temporal bounds of the group
  2383. :rtype: [{'field_name_1': value, ...}, ...]
  2384. :raise AccessError: if user is not allowed to access requested information
  2385. """
  2386. groupby = [groupby] if isinstance(groupby, str) else groupby
  2387. lazy_groupby = groupby[:1] if lazy else groupby
  2388. # Compatibility layer with _read_group, it should be remove in the second part of the refactoring
  2389. # - Modify `groupby` default value 'month' into specific groupby specification
  2390. # - Modify `fields` into aggregates specification of _read_group
  2391. # - Modify the order to be compatible with the _read_group specification
  2392. annotated_groupby = self._read_group_get_annotated_groupby(groupby, lazy=lazy)
  2393. annotated_aggregates = { # Key as the name in the result, value as the explicit aggregate specification
  2394. f"{lazy_groupby[0].split(':')[0]}_count" if lazy and len(lazy_groupby) == 1 else '__count': '__count',
  2395. }
  2396. for field_spec in fields:
  2397. if field_spec == '__count':
  2398. continue
  2399. match = regex_field_agg.match(field_spec)
  2400. if not match:
  2401. raise ValueError(f"Invalid field specification {field_spec!r}.")
  2402. name, func, fname = match.groups()
  2403. if fname: # Manage this kind of specification : "field_min:min(field)"
  2404. annotated_aggregates[name] = f"{fname}:{func}"
  2405. continue
  2406. if func: # Manage this kind of specification : "field:min"
  2407. annotated_aggregates[name] = f"{name}:{func}"
  2408. continue
  2409. if name not in self._fields:
  2410. raise ValueError(f"Invalid field {name!r} on model {self._name!r}")
  2411. field = self._fields[name]
  2412. if field.base_field.store and field.base_field.column_type and field.aggregator and field_spec not in annotated_groupby:
  2413. annotated_aggregates[name] = f"{name}:{field.aggregator}"
  2414. if orderby:
  2415. new_terms = []
  2416. for order_term in orderby.split(','):
  2417. order_term = order_term.strip()
  2418. for key_name, annotated in itertools.chain(reversed(annotated_groupby.items()), annotated_aggregates.items()):
  2419. key_name = key_name.split(':')[0]
  2420. if order_term.startswith(f'{key_name} ') or key_name == order_term:
  2421. order_term = order_term.replace(key_name, annotated)
  2422. break
  2423. new_terms.append(order_term)
  2424. orderby = ','.join(new_terms)
  2425. else:
  2426. orderby = ','.join(annotated_groupby.values())
  2427. rows = self._read_group(domain, annotated_groupby.values(), annotated_aggregates.values(), offset=offset, limit=limit, order=orderby)
  2428. rows_dict = [
  2429. dict(zip(itertools.chain(annotated_groupby, annotated_aggregates), row))
  2430. for row in rows
  2431. ]
  2432. fill_temporal = self.env.context.get('fill_temporal')
  2433. if lazy_groupby and (rows_dict and fill_temporal) or isinstance(fill_temporal, dict):
  2434. # fill_temporal = {} is equivalent to fill_temporal = True
  2435. # if fill_temporal is a dictionary and there is no data, there is a chance that we
  2436. # want to display empty columns anyway, so we should apply the fill_temporal logic
  2437. if not isinstance(fill_temporal, dict):
  2438. fill_temporal = {}
  2439. # TODO Shouldn't be possible with a limit
  2440. rows_dict = self._read_group_fill_temporal(
  2441. rows_dict, lazy_groupby,
  2442. annotated_aggregates, **fill_temporal,
  2443. )
  2444. if lazy_groupby and lazy:
  2445. # Right now, read_group only fill results in lazy mode (by default).
  2446. # If you need to have the empty groups in 'eager' mode, then the
  2447. # method _read_group_fill_results need to be completely reimplemented
  2448. # in a sane way
  2449. # TODO Shouldn't be possible with a limit or the limit should be in account
  2450. rows_dict = self._read_group_fill_results(
  2451. domain, lazy_groupby[0],
  2452. annotated_aggregates, rows_dict, read_group_order=orderby,
  2453. )
  2454. for row in rows_dict:
  2455. row['__domain'] = domain
  2456. if len(lazy_groupby) < len(groupby):
  2457. row['__context'] = {'group_by': groupby[len(lazy_groupby):]}
  2458. self._read_group_format_result(rows_dict, lazy_groupby)
  2459. return rows_dict
  2460. def _traverse_related_sql(self, alias: str, field: Field, query: Query):
  2461. """ Traverse the related `field` and add needed join to the `query`. """
  2462. assert field.related and not field.store
  2463. if not (self.env.su or field.compute_sudo or field.inherited):
  2464. raise ValueError(f'Cannot convert {field} to SQL because it is not a sudoed related or inherited field')
  2465. model = self.sudo(self.env.su or field.compute_sudo)
  2466. *path_fnames, last_fname = field.related.split('.')
  2467. for path_fname in path_fnames:
  2468. path_field = model._fields[path_fname]
  2469. if path_field.type != 'many2one':
  2470. raise ValueError(f'Cannot convert {field} (related={field.related}) to SQL because {path_fname} is not a Many2one')
  2471. comodel = model.env[path_field.comodel_name]
  2472. coalias = query.make_alias(alias, path_fname)
  2473. query.add_join('LEFT JOIN', coalias, comodel._table, SQL(
  2474. "%s = %s",
  2475. model._field_to_sql(alias, path_fname, query),
  2476. SQL.identifier(coalias, 'id'),
  2477. ))
  2478. model, alias = comodel, coalias
  2479. return model, model._fields[last_fname], alias
  2480. def _field_to_sql(self, alias: str, fname: str, query: (Query | None) = None, flush: bool = True) -> SQL:
  2481. """ Return an :class:`SQL` object that represents the value of the given
  2482. field from the given table alias, in the context of the given query.
  2483. The method also checks that the field is accessible for reading.
  2484. The query object is necessary for inherited fields, many2one fields and
  2485. properties fields, where joins are added to the query.
  2486. When parameter ``flush`` is true, the method adds some metadata in the
  2487. result to make method :meth:`~odoo.api.Environment.execute_query` flush
  2488. the field before executing the query.
  2489. """
  2490. property_name = None
  2491. if '.' in fname:
  2492. fname, property_name = fname.split('.', 1)
  2493. field = self._fields.get(fname)
  2494. if not field:
  2495. raise ValueError(f"Invalid field {fname!r} on model {self._name!r}")
  2496. if field.related and not field.store:
  2497. model, field, alias = self._traverse_related_sql(alias, field, query)
  2498. return model._field_to_sql(alias, field.name, query)
  2499. if not field.store or not field.column_type:
  2500. raise ValueError(f"Cannot convert {field} to SQL because it is not stored")
  2501. if field.type == 'properties' and property_name:
  2502. return SQL("%s -> %s", self._field_to_sql(alias, fname, query, flush), property_name)
  2503. if property_name:
  2504. fname = f"{fname}.{property_name}"
  2505. raise ValueError(f"Invalid field {fname!r} on model {self._name!r}")
  2506. self.check_field_access_rights('read', [field.name])
  2507. field_to_flush = field if flush and fname != 'id' else None
  2508. sql_field = SQL.identifier(alias, fname, to_flush=field_to_flush)
  2509. if field.translate:
  2510. langs = field.get_translation_fallback_langs(self.env)
  2511. sql_field_langs = [SQL("%s->>%s", sql_field, lang) for lang in langs]
  2512. if len(sql_field_langs) == 1:
  2513. return sql_field_langs[0]
  2514. return SQL("COALESCE(%s)", SQL(", ").join(sql_field_langs))
  2515. if field.company_dependent:
  2516. sql_field = SQL(
  2517. "%(column)s->%(company_id)s",
  2518. column=sql_field,
  2519. company_id=str(self.env.company.id),
  2520. )
  2521. fallback = field.get_company_dependent_fallback(self)
  2522. fallback = field.convert_to_column(field.convert_to_write(fallback, self), self)
  2523. if fallback not in (None, 0): # 0, 0.0, False, None
  2524. sql_field = SQL(
  2525. 'COALESCE(%(field)s, to_jsonb(%(fallback)s::%(column_type)s))',
  2526. field=sql_field,
  2527. fallback=fallback,
  2528. column_type=SQL(field._column_type[1]),
  2529. )
  2530. # here the specified value for a company might be NULL e.g. '{"1": null}'::jsonb
  2531. # the result of current sql_field might be 'null'::jsonb
  2532. # ('null'::jsonb)::text == 'null'
  2533. # ('null'::jsonb->>0)::text IS NULL
  2534. return SQL('(%s->>0)::%s', sql_field, SQL(field._column_type[1]))
  2535. return sql_field
  2536. def _read_group_groupby_properties(self, fname: str, property_name: str, query: Query) -> SQL:
  2537. definition = self.get_property_definition(f"{fname}.{property_name}")
  2538. property_type = definition.get('type')
  2539. sql_property = self._field_to_sql(self._table, f'{fname}.{property_name}', query)
  2540. # JOIN on the JSON array
  2541. if property_type in ('tags', 'many2many'):
  2542. property_alias = query.make_alias(self._table, f'{fname}_{property_name}')
  2543. sql_property = SQL(
  2544. """ CASE
  2545. WHEN jsonb_typeof(%(property)s) = 'array'
  2546. THEN %(property)s
  2547. ELSE '[]'::jsonb
  2548. END """,
  2549. property=sql_property,
  2550. )
  2551. if property_type == 'tags':
  2552. # ignore invalid tags
  2553. tags = [tag[0] for tag in definition.get('tags') or []]
  2554. # `->>0 : convert "JSON string" into string
  2555. condition = SQL(
  2556. "%s->>0 = ANY(%s::text[])",
  2557. SQL.identifier(property_alias), tags,
  2558. )
  2559. else:
  2560. comodel = self.env.get(definition.get('comodel'))
  2561. if comodel is None or comodel._transient or comodel._abstract:
  2562. raise UserError(_(
  2563. "You cannot use “%(property_name)s” because the linked “%(model_name)s” model doesn't exist or is invalid",
  2564. property_name=definition.get('string', property_name), model_name=definition.get('comodel'),
  2565. ))
  2566. # check the existences of the many2many
  2567. condition = SQL(
  2568. "%s::int IN (SELECT id FROM %s)",
  2569. SQL.identifier(property_alias), SQL.identifier(comodel._table),
  2570. )
  2571. query.add_join(
  2572. "LEFT JOIN",
  2573. property_alias,
  2574. SQL("jsonb_array_elements(%s)", sql_property),
  2575. condition,
  2576. )
  2577. return SQL.identifier(property_alias)
  2578. elif property_type == 'selection':
  2579. options = [option[0] for option in definition.get('selection') or ()]
  2580. # check the existence of the option
  2581. property_alias = query.make_alias(self._table, f'{fname}_{property_name}')
  2582. query.add_join(
  2583. "LEFT JOIN",
  2584. property_alias,
  2585. SQL("(SELECT unnest(%s::text[]) %s)", options, SQL.identifier(property_alias)),
  2586. SQL("%s->>0 = %s", sql_property, SQL.identifier(property_alias)),
  2587. )
  2588. return SQL.identifier(property_alias)
  2589. elif property_type == 'many2one':
  2590. comodel = self.env.get(definition.get('comodel'))
  2591. if comodel is None or comodel._transient or comodel._abstract:
  2592. raise UserError(_(
  2593. "You cannot use “%(property_name)s” because the linked “%(model_name)s” model doesn't exist or is invalid",
  2594. property_name=definition.get('string', property_name), model_name=definition.get('comodel'),
  2595. ))
  2596. return SQL(
  2597. """ CASE
  2598. WHEN jsonb_typeof(%(property)s) = 'number'
  2599. AND (%(property)s)::int IN (SELECT id FROM %(table)s)
  2600. THEN %(property)s
  2601. ELSE NULL
  2602. END """,
  2603. property=sql_property,
  2604. table=SQL.identifier(comodel._table),
  2605. )
  2606. elif property_type == 'date':
  2607. return SQL(
  2608. """ CASE
  2609. WHEN jsonb_typeof(%(property)s) = 'string'
  2610. THEN (%(property)s->>0)::DATE
  2611. ELSE NULL
  2612. END """,
  2613. property=sql_property,
  2614. )
  2615. elif property_type == 'datetime':
  2616. return SQL(
  2617. """ CASE
  2618. WHEN jsonb_typeof(%(property)s) = 'string'
  2619. THEN to_timestamp(%(property)s->>0, 'YYYY-MM-DD HH24:MI:SS')
  2620. ELSE NULL
  2621. END """,
  2622. property=sql_property,
  2623. )
  2624. # if the key is not present in the dict, fallback to false instead of none
  2625. return SQL("COALESCE(%s, 'false')", sql_property)
  2626. def _condition_to_sql(self, alias: str, fname: str, operator: str, value, query: Query) -> SQL:
  2627. """ Return an :class:`SQL` object that represents the domain condition
  2628. given by the triple ``(fname, operator, value)`` with the given table
  2629. alias, and in the context of the given query.
  2630. The method is also responsible for checking that the field is accessible
  2631. for reading, and should include metadata in the result object to make
  2632. sure that the necessary fields are flushed before executing the final
  2633. SQL query.
  2634. """
  2635. # sanity checks - should never fail
  2636. assert operator in expression.TERM_OPERATORS, \
  2637. f"Invalid operator {operator!r} in domain term {(fname, operator, value)!r}"
  2638. assert fname in self._fields, \
  2639. f"Invalid field {fname!r} in domain term {(fname, operator, value)!r}"
  2640. assert not isinstance(value, BaseModel), \
  2641. f"Invalid value {value!r} in domain term {(fname, operator, value)!r}"
  2642. if operator == '=?':
  2643. if value is False or value is None:
  2644. # '=?' is a short-circuit that makes the term TRUE if value is None or False
  2645. return SQL("TRUE")
  2646. else:
  2647. # '=?' behaves like '=' in other cases
  2648. return self._condition_to_sql(alias, fname, '=', value, query)
  2649. sql_field = self._field_to_sql(alias, fname, query)
  2650. field = self._fields[fname]
  2651. is_number_field = field.type in ('integer', 'float', 'monetary') and field.name != 'id'
  2652. is_char_field = field.type in ('char', 'text', 'html')
  2653. sql_operator = expression.SQL_OPERATORS[operator]
  2654. if operator in ('in', 'not in'):
  2655. # Two cases: value is a boolean or a list. The boolean case is an
  2656. # abuse and handled for backward compatibility.
  2657. if isinstance(value, bool):
  2658. _logger.warning("The domain term '%s' should use the '=' or '!=' operator.", (fname, operator, value))
  2659. if (operator == 'in' and value) or (operator == 'not in' and not value):
  2660. return SQL("(%s IS NOT NULL)", sql_field)
  2661. else:
  2662. return SQL("(%s IS NULL)", sql_field)
  2663. elif isinstance(value, SQL):
  2664. return SQL("(%s %s %s)", sql_field, sql_operator, value)
  2665. elif isinstance(value, Query):
  2666. return SQL("(%s %s %s)", sql_field, sql_operator, value.subselect())
  2667. elif isinstance(value, (list, tuple)):
  2668. params = [it for it in value if it is not False and it is not None]
  2669. check_null = len(params) < len(value)
  2670. if field.type == 'boolean':
  2671. # just replace instead of casting, only truthy values remain
  2672. params = [True] if any(params) else []
  2673. if check_null:
  2674. params.append(False)
  2675. elif is_number_field:
  2676. if check_null and 0 not in params:
  2677. params.append(0)
  2678. check_null = check_null or (0 in params)
  2679. elif is_char_field:
  2680. if check_null and '' not in params:
  2681. params.append('')
  2682. check_null = check_null or ('' in params)
  2683. if params:
  2684. if fname != 'id':
  2685. params = [field.convert_to_column(p, self, validate=False) for p in params]
  2686. sql = SQL("(%s %s %s)", sql_field, sql_operator, tuple(params))
  2687. else:
  2688. # The case for (fname, 'in', []) or (fname, 'not in', []).
  2689. sql = SQL("FALSE") if operator == 'in' else SQL("TRUE")
  2690. if (operator == 'in' and check_null) or (operator == 'not in' and not check_null):
  2691. sql = SQL("(%s OR %s IS NULL)", sql, sql_field)
  2692. elif operator == 'not in' and check_null:
  2693. sql = SQL("(%s AND %s IS NOT NULL)", sql, sql_field) # needed only for TRUE
  2694. return sql
  2695. else: # Must not happen
  2696. raise ValueError(f"Invalid domain term {(fname, operator, value)!r}")
  2697. if field.type == 'boolean' and operator in ('=', '!=') and isinstance(value, bool):
  2698. value = (not value) if operator in expression.NEGATIVE_TERM_OPERATORS else value
  2699. if value:
  2700. return SQL("(%s = TRUE)", sql_field)
  2701. else:
  2702. return SQL("(%s IS NULL OR %s = FALSE)", sql_field, sql_field)
  2703. if (field.relational or field.name == 'id') and operator in ('=', '!=') and isinstance(value, NewId):
  2704. _logger.warning("_condition_to_sql: ignored (%r, %r, NewId), did you mean (%r, 'in', recs.ids)?", fname, operator, fname)
  2705. return SQL("TRUE") if operator in expression.NEGATIVE_TERM_OPERATORS else SQL("FALSE")
  2706. # comparison with null
  2707. # except for some basic types, where we need to check the empty value
  2708. if (field.relational or field.name == 'id') and operator in ('=', '!=') and not value:
  2709. # if we compare a relation to 0, then compare only to False
  2710. value = False
  2711. if operator in ('=', '!=') and (value is False or value is None):
  2712. if is_number_field:
  2713. value = 0 # generates (fname = 0 OR fname IS NULL)
  2714. elif is_char_field:
  2715. value = '' # generates (fname = '' OR fname IS NULL)
  2716. elif operator == '=':
  2717. return SQL("%s IS NULL", sql_field)
  2718. elif operator == '!=':
  2719. return SQL("%s IS NOT NULL", sql_field)
  2720. # general case
  2721. need_wildcard = operator in expression.WILDCARD_OPERATORS
  2722. if isinstance(value, SQL):
  2723. sql_value = value
  2724. elif need_wildcard:
  2725. sql_value = SQL("%s", f"%{value}%")
  2726. else:
  2727. sql_value = SQL("%s", field.convert_to_column(value, self, validate=False))
  2728. sql_left = sql_field
  2729. if operator.endswith('like') and field.type not in ('char', 'text', 'html'):
  2730. sql_left = SQL("(%s)::text", sql_field)
  2731. if operator.endswith('ilike'):
  2732. sql_left = self.env.registry.unaccent(sql_left)
  2733. sql_value = self.env.registry.unaccent(sql_value)
  2734. if need_wildcard and not value:
  2735. return SQL("FALSE") if operator in expression.NEGATIVE_TERM_OPERATORS else SQL("TRUE")
  2736. sql = SQL("(%s %s %s)", sql_left, sql_operator, sql_value)
  2737. if (
  2738. bool(value) == (operator in expression.NEGATIVE_TERM_OPERATORS)
  2739. # exception: don't add for inequalities
  2740. and operator[:1] not in ('>', '<')
  2741. ):
  2742. sql = SQL("(%s OR %s IS NULL)", sql, sql_field)
  2743. if not need_wildcard and is_number_field:
  2744. cmp_value = field.convert_to_record(field.convert_to_cache(value, self), self)
  2745. if (
  2746. operator == '>=' and cmp_value <= 0
  2747. or operator == '<=' and cmp_value >= 0
  2748. or operator == '<' and cmp_value > 0
  2749. or operator == '>' and cmp_value < 0
  2750. ):
  2751. sql = SQL("(%s OR %s IS NULL)", sql, sql_field)
  2752. return sql
  2753. @api.model
  2754. def get_property_definition(self, full_name):
  2755. """Return the definition of the given property.
  2756. :param full_name: Name of the field / property
  2757. (e.g. "property.integer")
  2758. """
  2759. self.browse().check_access("read")
  2760. field_name, property_name = full_name.split(".")
  2761. check_property_field_value_name(property_name)
  2762. if field_name not in self._fields:
  2763. raise ValueError(f"Invalid field {field_name!r} on model {self._name!r}")
  2764. field = self._fields[field_name]
  2765. target_model = self.env[self._fields[field.definition_record].comodel_name]
  2766. self.env.cr.execute(SQL(
  2767. """ SELECT definition
  2768. FROM %(table)s, jsonb_array_elements(%(field)s) definition
  2769. WHERE %(field)s IS NOT NULL AND definition->>'name' = %(name)s
  2770. LIMIT 1 """,
  2771. table=SQL.identifier(target_model._table),
  2772. field=SQL.identifier(field.definition_record_field),
  2773. name=property_name,
  2774. ))
  2775. result = self.env.cr.dictfetchone()
  2776. return result["definition"] if result else {}
  2777. def _parent_store_compute(self):
  2778. """ Compute parent_path field from scratch. """
  2779. if not self._parent_store:
  2780. return
  2781. # Each record is associated to a string 'parent_path', that represents
  2782. # the path from the record's root node to the record. The path is made
  2783. # of the node ids suffixed with a slash (see example below). The nodes
  2784. # in the subtree of record are the ones where 'parent_path' starts with
  2785. # the 'parent_path' of record.
  2786. #
  2787. # a node | id | parent_path
  2788. # / \ a | 42 | 42/
  2789. # ... b b | 63 | 42/63/
  2790. # / \ c | 84 | 42/63/84/
  2791. # c d d | 85 | 42/63/85/
  2792. #
  2793. # Note: the final '/' is necessary to match subtrees correctly: '42/63'
  2794. # is a prefix of '42/630', but '42/63/' is not a prefix of '42/630/'.
  2795. _logger.info('Computing parent_path for table %s...', self._table)
  2796. query = SQL(
  2797. """ WITH RECURSIVE __parent_store_compute(id, parent_path) AS (
  2798. SELECT row.id, concat(row.id, '/')
  2799. FROM %(table)s row
  2800. WHERE row.%(parent)s IS NULL
  2801. UNION
  2802. SELECT row.id, concat(comp.parent_path, row.id, '/')
  2803. FROM %(table)s row, __parent_store_compute comp
  2804. WHERE row.%(parent)s = comp.id
  2805. )
  2806. UPDATE %(table)s row SET parent_path = comp.parent_path
  2807. FROM __parent_store_compute comp
  2808. WHERE row.id = comp.id """,
  2809. table=SQL.identifier(self._table),
  2810. parent=SQL.identifier(self._parent_name),
  2811. )
  2812. self.env.cr.execute(query)
  2813. self.invalidate_model(['parent_path'])
  2814. return True
  2815. def _check_removed_columns(self, log=False):
  2816. if self._abstract:
  2817. return
  2818. # iterate on the database columns to drop the NOT NULL constraints of
  2819. # fields which were required but have been removed (or will be added by
  2820. # another module)
  2821. cr = self._cr
  2822. cols = [name for name, field in self._fields.items()
  2823. if field.store and field.column_type]
  2824. cr.execute(SQL(
  2825. """ SELECT a.attname, a.attnotnull
  2826. FROM pg_class c, pg_attribute a
  2827. WHERE c.relname=%s
  2828. AND c.oid=a.attrelid
  2829. AND a.attisdropped=%s
  2830. AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')
  2831. AND a.attname NOT IN %s """,
  2832. self._table, False, tuple(cols),
  2833. ))
  2834. for row in cr.dictfetchall():
  2835. if log:
  2836. _logger.debug("column %s is in the table %s but not in the corresponding object %s",
  2837. row['attname'], self._table, self._name)
  2838. if row['attnotnull']:
  2839. sql.drop_not_null(cr, self._table, row['attname'])
  2840. def _init_column(self, column_name):
  2841. """ Initialize the value of the given column for existing rows. """
  2842. # get the default value; ideally, we should use default_get(), but it
  2843. # fails due to ir.default not being ready
  2844. field = self._fields[column_name]
  2845. if field.default:
  2846. value = field.default(self)
  2847. value = field.convert_to_write(value, self)
  2848. value = field.convert_to_column_insert(value, self)
  2849. else:
  2850. value = None
  2851. # Write value if non-NULL, except for booleans for which False means
  2852. # the same as NULL - this saves us an expensive query on large tables.
  2853. necessary = (value is not None) if field.type != 'boolean' else value
  2854. if necessary:
  2855. _logger.debug("Table '%s': setting default value of new column %s to %r",
  2856. self._table, column_name, value)
  2857. self._cr.execute(SQL(
  2858. "UPDATE %(table)s SET %(field)s = %(value)s WHERE %(field)s IS NULL",
  2859. table=SQL.identifier(self._table),
  2860. field=SQL.identifier(column_name),
  2861. value=value,
  2862. ))
  2863. @ormcache()
  2864. def _table_has_rows(self):
  2865. """ Return whether the model's table has rows. This method should only
  2866. be used when updating the database schema (:meth:`~._auto_init`).
  2867. """
  2868. self.env.cr.execute(SQL('SELECT 1 FROM %s LIMIT 1', SQL.identifier(self._table)))
  2869. return self.env.cr.rowcount
  2870. def _auto_init(self):
  2871. """ Initialize the database schema of ``self``:
  2872. - create the corresponding table,
  2873. - create/update the necessary columns/tables for fields,
  2874. - initialize new columns on existing rows,
  2875. - add the SQL constraints given on the model,
  2876. - add the indexes on indexed fields,
  2877. Also prepare post-init stuff to:
  2878. - add foreign key constraints,
  2879. - reflect models, fields, relations and constraints,
  2880. - mark fields to recompute on existing records.
  2881. Note: you should not override this method. Instead, you can modify
  2882. the model's database schema by overriding method :meth:`~.init`,
  2883. which is called right after this one.
  2884. """
  2885. raise_on_invalid_object_name(self._name)
  2886. # This prevents anything called by this method (in particular default
  2887. # values) from prefetching a field for which the corresponding column
  2888. # has not been added in database yet!
  2889. self = self.with_context(prefetch_fields=False)
  2890. cr = self._cr
  2891. update_custom_fields = self._context.get('update_custom_fields', False)
  2892. must_create_table = not sql.table_exists(cr, self._table)
  2893. parent_path_compute = False
  2894. if self._auto:
  2895. if must_create_table:
  2896. def make_type(field):
  2897. return field.column_type[1] + (" NOT NULL" if field.required else "")
  2898. sql.create_model_table(cr, self._table, self._description, [
  2899. (field.name, make_type(field), field.string)
  2900. for field in sorted(self._fields.values(), key=lambda f: f.column_order)
  2901. if field.name != 'id' and field.store and field.column_type
  2902. ])
  2903. if self._parent_store:
  2904. if not sql.column_exists(cr, self._table, 'parent_path'):
  2905. sql.create_column(self._cr, self._table, 'parent_path', 'VARCHAR')
  2906. parent_path_compute = True
  2907. self._check_parent_path()
  2908. if not must_create_table:
  2909. self._check_removed_columns(log=False)
  2910. # update the database schema for fields
  2911. columns = sql.table_columns(cr, self._table)
  2912. fields_to_compute = []
  2913. for field in sorted(self._fields.values(), key=lambda f: f.column_order):
  2914. if not field.store:
  2915. continue
  2916. if field.manual and not update_custom_fields:
  2917. continue # don't update custom fields
  2918. new = field.update_db(self, columns)
  2919. if new and field.compute:
  2920. fields_to_compute.append(field)
  2921. if fields_to_compute:
  2922. # mark existing records for computation now, so that computed
  2923. # required fields are flushed before the NOT NULL constraint is
  2924. # added to the database
  2925. cr.execute(SQL('SELECT id FROM %s', SQL.identifier(self._table)))
  2926. records = self.browse(row[0] for row in cr.fetchall())
  2927. if records:
  2928. for field in fields_to_compute:
  2929. _logger.info("Prepare computation of %s", field)
  2930. self.env.add_to_compute(field, records)
  2931. if self._auto:
  2932. self._add_sql_constraints()
  2933. if parent_path_compute:
  2934. self._parent_store_compute()
  2935. def init(self):
  2936. """ This method is called after :meth:`~._auto_init`, and may be
  2937. overridden to create or modify a model's database schema.
  2938. """
  2939. def _check_parent_path(self):
  2940. field = self._fields.get('parent_path')
  2941. if field is None:
  2942. _logger.error("add a field parent_path on model %r: `parent_path = fields.Char(index=True)`.", self._name)
  2943. elif not field.index:
  2944. _logger.error('parent_path field on model %r should be indexed! Add index=True to the field definition.', self._name)
  2945. def _add_sql_constraints(self):
  2946. """ Modify this model's database table constraints so they match the one
  2947. in _sql_constraints.
  2948. """
  2949. cr = self._cr
  2950. foreign_key_re = re.compile(r'\s*foreign\s+key\b.*', re.I)
  2951. for (key, definition, message) in self._sql_constraints:
  2952. conname = '%s_%s' % (self._table, key)
  2953. if len(conname) > 63:
  2954. hashed_conname = sql.make_identifier(conname)
  2955. current_definition = sql.constraint_definition(cr, self._table, hashed_conname)
  2956. if not current_definition:
  2957. _logger.info("Constraint name %r has more than 63 characters, internal PG identifier is %r", conname, hashed_conname)
  2958. conname = hashed_conname
  2959. else:
  2960. current_definition = sql.constraint_definition(cr, self._table, conname)
  2961. if current_definition == definition:
  2962. continue
  2963. if current_definition:
  2964. # constraint exists but its definition may have changed
  2965. sql.drop_constraint(cr, self._table, conname)
  2966. if not definition:
  2967. # virtual constraint (e.g. implemented by a custom index)
  2968. self.pool.post_init(sql.check_index_exist, cr, conname)
  2969. elif foreign_key_re.match(definition):
  2970. self.pool.post_init(sql.add_constraint, cr, self._table, conname, definition)
  2971. else:
  2972. self.pool.post_constraint(sql.add_constraint, cr, self._table, conname, definition)
  2973. #
  2974. # Update objects that use this one to update their _inherits fields
  2975. #
  2976. @api.model
  2977. def _add_inherited_fields(self):
  2978. """ Determine inherited fields. """
  2979. if self._abstract or not self._inherits:
  2980. return
  2981. # determine which fields can be inherited
  2982. to_inherit = {
  2983. name: (parent_fname, field)
  2984. for parent_model_name, parent_fname in self._inherits.items()
  2985. for name, field in self.env[parent_model_name]._fields.items()
  2986. }
  2987. # add inherited fields that are not redefined locally
  2988. for name, (parent_fname, field) in to_inherit.items():
  2989. if name not in self._fields:
  2990. # inherited fields are implemented as related fields, with the
  2991. # following specific properties:
  2992. # - reading inherited fields should not bypass access rights
  2993. # - copy inherited fields iff their original field is copied
  2994. Field = type(field)
  2995. self._add_field(name, Field(
  2996. inherited=True,
  2997. inherited_field=field,
  2998. related=f"{parent_fname}.{name}",
  2999. related_sudo=False,
  3000. copy=field.copy,
  3001. readonly=field.readonly,
  3002. export_string_translation=field.export_string_translation,
  3003. ))
  3004. @api.model
  3005. def _inherits_check(self):
  3006. for table, field_name in self._inherits.items():
  3007. field = self._fields.get(field_name)
  3008. if not field:
  3009. _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
  3010. from .fields import Many2one
  3011. field = Many2one(table, string="Automatically created field to link to parent %s" % table, required=True, ondelete="cascade")
  3012. self._add_field(field_name, field)
  3013. elif not (field.required and (field.ondelete or "").lower() in ("cascade", "restrict")):
  3014. _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
  3015. field.required = True
  3016. field.ondelete = "cascade"
  3017. field.delegate = True
  3018. # reflect fields with delegate=True in dictionary self._inherits
  3019. for field in self._fields.values():
  3020. if field.type == 'many2one' and not field.related and field.delegate:
  3021. if not field.required:
  3022. _logger.warning("Field %s with delegate=True must be required.", field)
  3023. field.required = True
  3024. if field.ondelete.lower() not in ('cascade', 'restrict'):
  3025. field.ondelete = 'cascade'
  3026. self.pool[self._name]._inherits = {**self._inherits, field.comodel_name: field.name}
  3027. self.pool[field.comodel_name]._inherits_children.add(self._name)
  3028. @api.model
  3029. def _prepare_setup(self):
  3030. """ Prepare the setup of the model. """
  3031. cls = self.env.registry[self._name]
  3032. cls._setup_done = False
  3033. # changing base classes is costly, do it only when necessary
  3034. if cls.__bases__ != cls.__base_classes:
  3035. cls.__bases__ = cls.__base_classes
  3036. # reset those attributes on the model's class for _setup_fields() below
  3037. for attr in ('_rec_name', '_active_name'):
  3038. discardattr(cls, attr)
  3039. @api.model
  3040. def _setup_base(self):
  3041. """ Determine the inherited and custom fields of the model. """
  3042. cls = self.env.registry[self._name]
  3043. if cls._setup_done:
  3044. return
  3045. # the classes that define this model, i.e., the ones that are not
  3046. # registry classes; the purpose of this attribute is to behave as a
  3047. # cache of [c for c in cls.mro() if not is_registry_class(c))], which
  3048. # is heavily used in function fields.resolve_mro()
  3049. cls._model_classes__ = tuple(c for c in cls.mro() if getattr(c, 'pool', None) is None)
  3050. # 1. determine the proper fields of the model: the fields defined on the
  3051. # class and magic fields, not the inherited or custom ones
  3052. # retrieve fields from parent classes, and duplicate them on cls to
  3053. # avoid clashes with inheritance between different models
  3054. for name in cls._fields:
  3055. discardattr(cls, name)
  3056. cls._fields.clear()
  3057. # collect the definitions of each field (base definition + overrides)
  3058. definitions = defaultdict(list)
  3059. for klass in reversed(cls._model_classes__):
  3060. # this condition is an optimization of is_definition_class(klass)
  3061. if isinstance(klass, MetaModel):
  3062. for field in klass._field_definitions:
  3063. definitions[field.name].append(field)
  3064. for name, fields_ in definitions.items():
  3065. if f'{cls._name}.{name}' in cls.pool._database_translated_fields:
  3066. # the field is currently translated in the database; ensure the
  3067. # field is translated to avoid converting its column to varchar
  3068. # and losing data
  3069. translate = next((
  3070. field.args['translate'] for field in reversed(fields_) if 'translate' in field.args
  3071. ), False)
  3072. if not translate:
  3073. # patch the field definition by adding an override
  3074. _logger.debug("Patching %s.%s with translate=True", cls._name, name)
  3075. fields_.append(type(fields_[0])(translate=True))
  3076. if len(fields_) == 1 and fields_[0]._direct and fields_[0].model_name == cls._name:
  3077. cls._fields[name] = fields_[0]
  3078. else:
  3079. Field = type(fields_[-1])
  3080. self._add_field(name, Field(_base_fields=fields_))
  3081. # 2. add manual fields
  3082. if self.pool._init_modules:
  3083. self.env['ir.model.fields']._add_manual_fields(self)
  3084. # 3. make sure that parent models determine their own fields, then add
  3085. # inherited fields to cls
  3086. self._inherits_check()
  3087. for parent in self._inherits:
  3088. self.env[parent]._setup_base()
  3089. self._add_inherited_fields()
  3090. # 4. initialize more field metadata
  3091. cls._setup_done = True
  3092. for field in cls._fields.values():
  3093. field.prepare_setup()
  3094. # 5. determine and validate rec_name
  3095. if cls._rec_name:
  3096. assert cls._rec_name in cls._fields, \
  3097. "Invalid _rec_name=%r for model %r" % (cls._rec_name, cls._name)
  3098. elif 'name' in cls._fields:
  3099. cls._rec_name = 'name'
  3100. elif cls._custom and 'x_name' in cls._fields:
  3101. cls._rec_name = 'x_name'
  3102. # 6. determine and validate active_name
  3103. if cls._active_name:
  3104. assert (cls._active_name in cls._fields
  3105. and cls._active_name in ('active', 'x_active')), \
  3106. ("Invalid _active_name=%r for model %r; only 'active' and "
  3107. "'x_active' are supported and the field must be present on "
  3108. "the model") % (cls._active_name, cls._name)
  3109. elif 'active' in cls._fields:
  3110. cls._active_name = 'active'
  3111. elif 'x_active' in cls._fields:
  3112. cls._active_name = 'x_active'
  3113. @api.model
  3114. def _setup_fields(self):
  3115. """ Setup the fields, except for recomputation triggers. """
  3116. cls = self.env.registry[self._name]
  3117. # set up fields
  3118. bad_fields = []
  3119. many2one_company_dependents = self.env.registry.many2one_company_dependents
  3120. for name, field in cls._fields.items():
  3121. try:
  3122. field.setup(self)
  3123. except Exception:
  3124. if field.base_field.manual:
  3125. # Something goes wrong when setup a manual field.
  3126. # This can happen with related fields using another manual many2one field
  3127. # that hasn't been loaded because the comodel does not exist yet.
  3128. # This can also be a manual function field depending on not loaded fields yet.
  3129. bad_fields.append(name)
  3130. continue
  3131. raise
  3132. if field.type == 'many2one' and field.company_dependent:
  3133. many2one_company_dependents.add(field.comodel_name, field)
  3134. for name in bad_fields:
  3135. self._pop_field(name)
  3136. @api.model
  3137. def _setup_complete(self):
  3138. """ Setup recomputation triggers, and complete the model setup. """
  3139. cls = self.env.registry[self._name]
  3140. # register constraints and onchange methods
  3141. cls._init_constraints_onchanges()
  3142. @api.model
  3143. def fields_get(self, allfields=None, attributes=None):
  3144. """ fields_get([allfields][, attributes])
  3145. Return the definition of each field.
  3146. The returned value is a dictionary (indexed by field name) of
  3147. dictionaries. The _inherits'd fields are included. The string, help,
  3148. and selection (if present) attributes are translated.
  3149. :param list allfields: fields to document, all if empty or not provided
  3150. :param list attributes: attributes to return for each field, all if empty or not provided
  3151. :return: dictionary mapping field names to a dictionary mapping attributes to values.
  3152. :rtype: dict
  3153. """
  3154. res = {}
  3155. for fname, field in self._fields.items():
  3156. if allfields and fname not in allfields:
  3157. continue
  3158. if not field.is_accessible(self.env):
  3159. continue
  3160. description = field.get_description(self.env, attributes=attributes)
  3161. res[fname] = description
  3162. return res
  3163. @api.model
  3164. def check_field_access_rights(self, operation, field_names):
  3165. """Check the user access rights on the given fields.
  3166. :param str operation: one of ``create``, ``read``, ``write``, ``unlink``
  3167. :param field_names: names of the fields
  3168. :type field_names: list or None
  3169. :return: provided fields if fields is truthy (or the fields
  3170. readable by the current user).
  3171. :rtype: list
  3172. :raise AccessError: if the user is not allowed to access
  3173. the provided fields.
  3174. """
  3175. if self.env.su:
  3176. return field_names or list(self._fields)
  3177. if not field_names:
  3178. field_names = [name for name, field in self._fields.items() if field.is_accessible(self.env)]
  3179. else:
  3180. # Unknown (or virtual) fields are considered accessible because they will not be read and nothing will be written to them.
  3181. invalid_fields = [name for name in field_names if name in self._fields and not self._fields[name].is_accessible(self.env)]
  3182. if invalid_fields:
  3183. _logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
  3184. operation, self._uid, self._name, ', '.join(invalid_fields))
  3185. description = self.env['ir.model']._get(self._name).name
  3186. error_msg = _(
  3187. "You do not have enough rights to access the fields \"%(fields)s\""
  3188. " on %(document_kind)s (%(document_model)s). "
  3189. "Please contact your system administrator."
  3190. "\n\nOperation: %(operation)s",
  3191. fields=','.join(invalid_fields),
  3192. document_kind=description,
  3193. document_model=self._name,
  3194. operation=operation,
  3195. )
  3196. if self.env.user._has_group('base.group_no_one'):
  3197. def format_groups(field):
  3198. if field.groups == '.':
  3199. return _("always forbidden")
  3200. groups_list = [self.env.ref(g) for g in field.groups.split(',')]
  3201. groups = self.env['res.groups'].union(*groups_list).sorted('id')
  3202. return _(
  3203. "allowed for groups %s",
  3204. ', '.join(repr(g.display_name) for g in groups),
  3205. )
  3206. error_msg += _(
  3207. "\nUser: %(user)s"
  3208. "\nFields:"
  3209. "\n%(fields_list)s",
  3210. user=self._uid,
  3211. fields_list='\n'.join(
  3212. '- %s (%s)' % (f, format_groups(self._fields[f]))
  3213. for f in sorted(invalid_fields)
  3214. ),
  3215. )
  3216. raise AccessError(error_msg)
  3217. return field_names
  3218. @api.readonly
  3219. def read(self, fields=None, load='_classic_read') -> list[ValuesType]:
  3220. """ read([fields])
  3221. Read the requested fields for the records in ``self``, and return their
  3222. values as a list of dicts.
  3223. :param list fields: field names to return (default is all fields)
  3224. :param str load: loading mode, currently the only option is to set to
  3225. ``None`` to avoid loading the `display_name` of m2o fields
  3226. :return: a list of dictionaries mapping field names to their values,
  3227. with one dictionary per record
  3228. :rtype: list
  3229. :raise AccessError: if user is not allowed to access requested information
  3230. :raise ValueError: if a requested field does not exist
  3231. This is a high-level method that is not supposed to be overridden. In
  3232. order to modify how fields are read from database, see methods
  3233. :meth:`_fetch_query` and :meth:`_read_format`.
  3234. """
  3235. fields = self.check_field_access_rights('read', fields)
  3236. self._origin.fetch(fields)
  3237. return self._read_format(fnames=fields, load=load)
  3238. def update_field_translations(self, field_name, translations, source_lang=None):
  3239. """ Update the translations for a given field
  3240. See 'self._update_field_translations' docstring for details.
  3241. """
  3242. return self._update_field_translations(field_name, translations, source_lang=source_lang)
  3243. def _update_field_translations(self, field_name, translations, digest=None, source_lang=None):
  3244. """ Update the translations for a given field, with support for handling
  3245. old terms using an optional digest function.
  3246. :param str field_name: The name of the field to update.
  3247. :param dict translations: The translations to apply.
  3248. If `field.translate` is `True`, the dictionary should be in the format:
  3249. {lang: new_value}
  3250. where
  3251. new_value (str): The new translation for the specified language.
  3252. new_value (False): Removes the translation for the specified
  3253. language and falls back to the latest 'en_US' value.
  3254. If `field.translate` is a callable, the dictionary should be in the format:
  3255. {lang: {old_source_lang_term: new_term}} or
  3256. {lang: {digest(old_source_lang_term): new_term}} when `digest` is callable.
  3257. where
  3258. new_value (str): The new translation of old_term for the specified language.
  3259. new_value (False/''): Removes the translation for the specified
  3260. language and falls back to the old source_lang_term.
  3261. :param callable digest: An optional function to generate identifiers for old terms.
  3262. :param str source_lang: The language of old_source_lang_term in translations.
  3263. Defaults to 'en_US' if not specified.
  3264. """
  3265. self.ensure_one()
  3266. self.check_access('write')
  3267. self.check_field_access_rights('write', [field_name])
  3268. valid_langs = set(code for code, _ in self.env['res.lang'].get_installed()) | {'en_US'}
  3269. source_lang = source_lang or 'en_US'
  3270. missing_langs = (set(translations) | {source_lang}) - valid_langs
  3271. if missing_langs:
  3272. raise UserError(
  3273. _("The following languages are not activated: %(missing_names)s",
  3274. missing_names=', '.join(missing_langs))
  3275. )
  3276. field = self._fields[field_name]
  3277. if not field.translate:
  3278. return False # or raise error
  3279. if not field.store and not field.related and field.compute:
  3280. # a non-related non-stored computed field cannot be translated, even if it has inverse function
  3281. return False
  3282. # Strictly speaking, a translated related/computed field cannot be stored
  3283. # because the compute function only support one language
  3284. # `not field.store` is a redundant logic.
  3285. # But some developers store translated related fields.
  3286. # In these cases, only all translations of the first stored translation field will be updated
  3287. # For other stored related translated field, the translation for the flush language will be updated
  3288. if field.related and not field.store:
  3289. related_path, field_name = field.related.rsplit(".", 1)
  3290. return self.mapped(related_path)._update_field_translations(field_name, translations, digest)
  3291. if field.translate is True:
  3292. # falsy values (except emtpy str) are used to void the corresponding translation
  3293. if any(translation and not isinstance(translation, str) for translation in translations.values()):
  3294. raise UserError(_("Translations for model translated fields only accept falsy values and str"))
  3295. value_en = translations.get('en_US', True)
  3296. if not value_en and value_en != '':
  3297. translations.pop('en_US')
  3298. translations = {
  3299. lang: translation if isinstance(translation, str) else None
  3300. for lang, translation in translations.items()
  3301. }
  3302. if not translations:
  3303. return False
  3304. translation_fallback = translations['en_US'] if translations.get('en_US') is not None \
  3305. else translations[self.env.lang] if translations.get(self.env.lang) is not None \
  3306. else next((v for v in translations.values() if v is not None), None)
  3307. self.invalidate_recordset([field_name])
  3308. self._cr.execute(SQL(
  3309. """ UPDATE %(table)s
  3310. SET %(field)s = NULLIF(
  3311. jsonb_strip_nulls(%(fallback)s || COALESCE(%(field)s, '{}'::jsonb) || %(value)s),
  3312. '{}'::jsonb)
  3313. WHERE id = %(id)s
  3314. """,
  3315. table=SQL.identifier(self._table),
  3316. field=SQL.identifier(field_name),
  3317. fallback=Json({'en_US': translation_fallback}),
  3318. value=Json(translations),
  3319. id=self.id,
  3320. ))
  3321. self.modified([field_name])
  3322. else:
  3323. old_values = field._get_stored_translations(self)
  3324. if not old_values:
  3325. return False
  3326. for lang in translations:
  3327. # for languages to be updated, use the unconfirmed translated value to replace the language value
  3328. if f'_{lang}' in old_values:
  3329. old_values[lang] = old_values.pop(f'_{lang}')
  3330. translations = {lang: _translations for lang, _translations in translations.items() if _translations}
  3331. old_source_lang_value = old_values[next(
  3332. lang
  3333. for lang in [f'_{source_lang}', source_lang, '_en_US', 'en_US']
  3334. if lang in old_values)]
  3335. old_values_to_translate = {
  3336. lang: value
  3337. for lang, value in old_values.items()
  3338. if lang != source_lang and lang in translations
  3339. }
  3340. old_translation_dictionary = field.get_translation_dictionary(old_source_lang_value, old_values_to_translate)
  3341. if digest:
  3342. # replace digested old_en_term with real old_en_term
  3343. digested2term = {
  3344. digest(old_en_term): old_en_term
  3345. for old_en_term in old_translation_dictionary
  3346. }
  3347. translations = {
  3348. lang: {
  3349. digested2term[src]: value
  3350. for src, value in lang_translations.items()
  3351. if src in digested2term
  3352. }
  3353. for lang, lang_translations in translations.items()
  3354. }
  3355. new_values = old_values
  3356. for lang, _translations in translations.items():
  3357. _old_translations = {src: values[lang] for src, values in old_translation_dictionary.items() if lang in values}
  3358. _new_translations = {**_old_translations, **_translations}
  3359. new_values[lang] = field.translate(_new_translations.get, old_source_lang_value)
  3360. self.env.cache.update_raw(self, field, [new_values], dirty=True)
  3361. # the following write is incharge of
  3362. # 1. mark field as modified
  3363. # 2. execute logics in the override `write` method
  3364. # 3. update write_date of the record if exists to support 't-cache'
  3365. # even if the value in cache is the same as the value written
  3366. self[field_name] = self[field_name]
  3367. return True
  3368. def get_field_translations(self, field_name, langs=None):
  3369. """ get model/model_term translations for records
  3370. :param str field_name: field name
  3371. :param list langs: languages
  3372. :return: (translations, context) where
  3373. translations: list of dicts like [{"lang": lang, "source": source_term, "value": value_term}]
  3374. context: {"translation_type": "text"/"char", "translation_show_source": True/False}
  3375. """
  3376. self.ensure_one()
  3377. field = self._fields[field_name]
  3378. # We don't forbid reading inactive/non-existing languages,
  3379. langs = set(langs or [l[0] for l in self.env['res.lang'].get_installed()])
  3380. self_lang = self.with_context(check_translations=True, prefetch_langs=True)
  3381. val_en = self_lang.with_context(lang='en_US')[field_name]
  3382. if not field.translate:
  3383. translations = []
  3384. elif field.translate is True:
  3385. translations = [{
  3386. 'lang': lang,
  3387. 'source': val_en,
  3388. 'value': self_lang.with_context(lang=lang)[field_name]
  3389. } for lang in langs]
  3390. else:
  3391. translation_dictionary = field.get_translation_dictionary(
  3392. val_en, {lang: self_lang.with_context(lang=lang)[field_name] for lang in langs}
  3393. )
  3394. translations = [{
  3395. 'lang': lang,
  3396. 'source': term_en,
  3397. 'value': term_lang if term_lang != term_en else ''
  3398. } for term_en, translations in translation_dictionary.items()
  3399. for lang, term_lang in translations.items()]
  3400. context = {}
  3401. context['translation_type'] = 'text' if field.type in ['text', 'html'] else 'char'
  3402. context['translation_show_source'] = callable(field.translate)
  3403. return translations, context
  3404. def _get_base_lang(self):
  3405. """ Returns the base language of the record. """
  3406. self.ensure_one()
  3407. return 'en_US'
  3408. def _read_format(self, fnames, load='_classic_read'):
  3409. """Returns a list of dictionaries mapping field names to their values,
  3410. with one dictionary per record that exists.
  3411. The output format is the one expected from the `read` method, which uses
  3412. this method as its implementation for formatting values.
  3413. For the properties fields, call convert_to_read_multi instead of convert_to_read
  3414. to prepare everything (record existences, display name, etc) in batch.
  3415. The current method is different from `read` because it retrieves its
  3416. values from the cache without doing a query when it is avoidable.
  3417. """
  3418. data = [(record, {'id': record.id}) for record in self]
  3419. use_display_name = (load == '_classic_read')
  3420. for name in fnames:
  3421. field = self._fields[name]
  3422. if field.type == 'properties':
  3423. values_list = []
  3424. records = []
  3425. for record, vals in data:
  3426. try:
  3427. values_list.append(record[name])
  3428. records.append(record.id)
  3429. except MissingError:
  3430. vals.clear()
  3431. results = field.convert_to_read_multi(values_list, self.browse(records))
  3432. for record_read_vals, convert_result in zip(data, results):
  3433. record_read_vals[1][name] = convert_result
  3434. continue
  3435. convert = field.convert_to_read
  3436. for record, vals in data:
  3437. # missing records have their vals empty
  3438. if not vals:
  3439. continue
  3440. try:
  3441. vals[name] = convert(record[name], record, use_display_name)
  3442. except MissingError:
  3443. vals.clear()
  3444. result = [vals for record, vals in data if vals]
  3445. return result
  3446. def _fetch_field(self, field):
  3447. """ Read from the database in order to fetch ``field`` (:class:`Field`
  3448. instance) for ``self`` in cache.
  3449. """
  3450. self.check_field_access_rights('read', [field.name])
  3451. # determine which fields can be prefetched
  3452. if self._context.get('prefetch_fields', True) and field.prefetch:
  3453. fnames = [
  3454. name
  3455. for name, f in self._fields.items()
  3456. # select fields with the same prefetch group
  3457. if f.prefetch == field.prefetch
  3458. # discard fields with groups that the user may not access
  3459. if f.is_accessible(self.env)
  3460. ]
  3461. if field.name not in fnames:
  3462. fnames.append(field.name)
  3463. else:
  3464. fnames = [field.name]
  3465. self.fetch(fnames)
  3466. def fetch(self, field_names):
  3467. """ Make sure the given fields are in memory for the records in ``self``,
  3468. by fetching what is necessary from the database. Non-stored fields are
  3469. mostly ignored, except for their stored dependencies. This method should
  3470. be called to optimize code.
  3471. :param field_names: a collection of field names to fetch
  3472. :raise AccessError: if user is not allowed to access requested information
  3473. This method is implemented thanks to methods :meth:`_search` and
  3474. :meth:`_fetch_query`, and should not be overridden.
  3475. """
  3476. if not self or not field_names:
  3477. return
  3478. fields_to_fetch = self._determine_fields_to_fetch(field_names, ignore_when_in_cache=True)
  3479. # first determine a query that satisfies the domain and access rules
  3480. if any(field.column_type for field in fields_to_fetch):
  3481. query = self.with_context(active_test=False)._search([('id', 'in', self.ids)])
  3482. else:
  3483. try:
  3484. self.check_access('read')
  3485. except MissingError:
  3486. # Method fetch() should never raise a MissingError, but method
  3487. # check_access() can, because it must read fields on self.
  3488. # So we restrict 'self' to existing records (to avoid an extra
  3489. # exists() at the end of the method).
  3490. self = self.exists()
  3491. self.check_access('read')
  3492. if not fields_to_fetch:
  3493. return
  3494. query = self._as_query(ordered=False)
  3495. # fetch the fields
  3496. fetched = self._fetch_query(query, fields_to_fetch)
  3497. # possibly raise exception for the records that could not be read
  3498. if fetched != self:
  3499. forbidden = (self - fetched).exists()
  3500. if forbidden:
  3501. raise self.env['ir.rule']._make_access_error('read', forbidden)
  3502. def _determine_fields_to_fetch(self, field_names, ignore_when_in_cache=False) -> list[Field]:
  3503. """
  3504. Return the fields to fetch from database among the given field names,
  3505. and following the dependencies of computed fields. The method is used
  3506. by :meth:`fetch` and :meth:`search_fetch`.
  3507. :param field_names: the list of fields requested
  3508. :param ignore_when_in_cache: whether to ignore fields that are alreay in cache for ``self``
  3509. :return: the list of fields that must be fetched
  3510. """
  3511. if not field_names:
  3512. return []
  3513. cache = self.env.cache
  3514. fields_to_fetch = []
  3515. field_names_todo = deque(self.check_field_access_rights('read', field_names))
  3516. field_names_done = {'id'} # trick: ignore 'id'
  3517. while field_names_todo:
  3518. field_name = field_names_todo.popleft()
  3519. if field_name in field_names_done:
  3520. continue
  3521. field_names_done.add(field_name)
  3522. field = self._fields.get(field_name)
  3523. if not field:
  3524. raise ValueError(f"Invalid field {field_name!r} on model {self._name!r}")
  3525. if ignore_when_in_cache and not any(cache.get_missing_ids(self, field)):
  3526. # field is already in cache: don't fetch it
  3527. continue
  3528. if field.store:
  3529. fields_to_fetch.append(field)
  3530. else:
  3531. # optimization: fetch field dependencies
  3532. for dotname in self.pool.field_depends[field]:
  3533. dep_field = self._fields[dotname.split('.', 1)[0]]
  3534. if (not dep_field.store) or (dep_field.prefetch is True and
  3535. dep_field.is_accessible(self.env)
  3536. ):
  3537. field_names_todo.append(dep_field.name)
  3538. return fields_to_fetch
  3539. def _fetch_query(self, query, fields):
  3540. """ Fetch the given fields (iterable of :class:`Field` instances) from
  3541. the given query, put them in cache, and return the fetched records.
  3542. This method may be overridden to change what fields to actually fetch,
  3543. or to change the values that are put in cache.
  3544. """
  3545. # determine columns fields and those with their own read() method
  3546. column_fields = OrderedSet()
  3547. other_fields = OrderedSet()
  3548. for field in fields:
  3549. if field.name == 'id':
  3550. continue
  3551. assert field.store
  3552. (column_fields if field.column_type else other_fields).add(field)
  3553. context = self.env.context
  3554. if column_fields:
  3555. # the query may involve several tables: we need fully-qualified names
  3556. sql_terms = [SQL.identifier(self._table, 'id')]
  3557. for field in column_fields:
  3558. if field.type == 'binary' and (
  3559. context.get('bin_size') or context.get('bin_size_' + field.name)):
  3560. # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
  3561. sql = self._field_to_sql(self._table, field.name, query)
  3562. sql = SQL("pg_size_pretty(length(%s)::bigint)", sql)
  3563. elif field.translate and self.env.context.get('prefetch_langs'):
  3564. sql = SQL.identifier(self._table, field.name, to_flush=field)
  3565. else:
  3566. # flushing is necessary to retrieve the en_US value of fields without a translation
  3567. sql = self._field_to_sql(self._table, field.name, query, flush=field.translate)
  3568. sql_terms.append(sql)
  3569. # select the given columns from the rows in the query
  3570. rows = self.env.execute_query(query.select(*sql_terms))
  3571. if not rows:
  3572. return self.browse()
  3573. # rows = [(id1, a1, b1), (id2, a2, b2), ...]
  3574. # column_values = [(id1, id2, ...), (a1, a2, ...), (b1, b2, ...)]
  3575. column_values = zip(*rows)
  3576. ids = next(column_values)
  3577. fetched = self.browse(ids)
  3578. # If we assume that the value of a pending update is in cache, we
  3579. # can avoid flushing pending updates if the fetched values do not
  3580. # overwrite values in cache.
  3581. for field in column_fields:
  3582. values = next(column_values)
  3583. # store values in cache, but without overwriting
  3584. self.env.cache.insert_missing(fetched, field, values)
  3585. else:
  3586. fetched = self.browse(query)
  3587. # process non-column fields
  3588. if fetched:
  3589. for field in other_fields:
  3590. field.read(fetched)
  3591. return fetched
  3592. def get_metadata(self):
  3593. """Return some metadata about the given records.
  3594. :return: list of ownership dictionaries for each requested record
  3595. :rtype: list of dictionaries with the following keys:
  3596. * id: object id
  3597. * create_uid: user who created the record
  3598. * create_date: date when the record was created
  3599. * write_uid: last user who changed the record
  3600. * write_date: date of the last change to the record
  3601. * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
  3602. * xmlids: list of dict with xmlid in format ``module.name``, and noupdate as boolean
  3603. * noupdate: A boolean telling if the record will be updated or not
  3604. """
  3605. IrModelData = self.env['ir.model.data'].sudo()
  3606. if self._log_access:
  3607. res = self.read(LOG_ACCESS_COLUMNS)
  3608. else:
  3609. res = [{'id': x} for x in self.ids]
  3610. xml_data = defaultdict(list)
  3611. imds = IrModelData.search_read(
  3612. [('model', '=', self._name), ('res_id', 'in', self.ids)],
  3613. ['res_id', 'noupdate', 'module', 'name'],
  3614. order='id DESC'
  3615. )
  3616. for imd in imds:
  3617. xml_data[imd['res_id']].append({
  3618. 'xmlid': "%s.%s" % (imd['module'], imd['name']),
  3619. 'noupdate': imd['noupdate'],
  3620. })
  3621. for r in res:
  3622. main = xml_data.get(r['id'], [{}])[-1]
  3623. r['xmlid'] = main.get('xmlid', False)
  3624. r['noupdate'] = main.get('noupdate', False)
  3625. r['xmlids'] = xml_data.get(r['id'], [])[::-1]
  3626. return res
  3627. def get_base_url(self):
  3628. """ Return rooturl for a specific record.
  3629. By default, it returns the ir.config.parameter of base_url
  3630. but it can be overridden by model.
  3631. :return: the base url for this record
  3632. :rtype: str
  3633. """
  3634. if len(self) > 1:
  3635. raise ValueError("Expected singleton or no record: %s" % self)
  3636. return self.env['ir.config_parameter'].sudo().get_param('web.base.url')
  3637. def _check_company_domain(self, companies):
  3638. """Domain to be used for company consistency between records regarding this model.
  3639. :param companies: the allowed companies for the related record
  3640. :type companies: BaseModel or list or tuple or int or unquote
  3641. """
  3642. if not companies:
  3643. return [('company_id', '=', False)]
  3644. if isinstance(companies, unquote):
  3645. return [('company_id', 'in', unquote(f'{companies} + [False]'))]
  3646. return [('company_id', 'in', to_company_ids(companies) + [False])]
  3647. def _check_company(self, fnames=None):
  3648. """ Check the companies of the values of the given field names.
  3649. :param list fnames: names of relational fields to check
  3650. :raises UserError: if the `company_id` of the value of any field is not
  3651. in `[False, self.company_id]` (or `self` if
  3652. :class:`~odoo.addons.base.models.res_company`).
  3653. For :class:`~odoo.addons.base.models.res_users` relational fields,
  3654. verifies record company is in `company_ids` fields.
  3655. User with main company A, having access to company A and B, could be
  3656. assigned or linked to records in company B.
  3657. """
  3658. if fnames is None or {'company_id', 'company_ids'} & set(fnames):
  3659. fnames = self._fields
  3660. regular_fields = []
  3661. property_fields = []
  3662. for name in fnames:
  3663. field = self._fields[name]
  3664. if field.relational and field.check_company:
  3665. if not field.company_dependent:
  3666. regular_fields.append(name)
  3667. else:
  3668. property_fields.append(name)
  3669. if not (regular_fields or property_fields):
  3670. return
  3671. inconsistencies = []
  3672. for record in self:
  3673. # The first part of the check verifies that all records linked via relation fields are compatible
  3674. # with the company of the origin document, i.e. `self.account_id.company_id == self.company_id`
  3675. if regular_fields:
  3676. if self._name == 'res.company':
  3677. companies = record
  3678. elif 'company_id' in self:
  3679. companies = record.company_id
  3680. elif 'company_ids' in self:
  3681. companies = record.company_ids
  3682. else:
  3683. _logger.warning(_(
  3684. "Skipping a company check for model %(model_name)s. Its fields %(field_names)s are set as company-dependent, "
  3685. "but the model doesn't have a `company_id` or `company_ids` field!",
  3686. model_name=self._name, field_names=regular_fields
  3687. ))
  3688. continue
  3689. for name in regular_fields:
  3690. corecords = record.sudo()[name]
  3691. if corecords:
  3692. domain = corecords._check_company_domain(companies) # pylint: disable=0601
  3693. if domain and corecords != corecords.with_context(active_test=False).filtered_domain(domain):
  3694. inconsistencies.append((record, name, corecords))
  3695. # The second part of the check (for property / company-dependent fields) verifies that the records
  3696. # linked via those relation fields are compatible with the company that owns the property value, i.e.
  3697. # the company for which the value is being assigned, i.e:
  3698. # `self.property_account_payable_id.company_id == self.env.company
  3699. company = self.env.company
  3700. for name in property_fields:
  3701. corecords = record.sudo()[name]
  3702. if corecords:
  3703. domain = corecords._check_company_domain(company)
  3704. if domain and corecords != corecords.with_context(active_test=False).filtered_domain(domain):
  3705. inconsistencies.append((record, name, corecords))
  3706. if inconsistencies:
  3707. lines = [_("Incompatible companies on records:")]
  3708. company_msg = _lt("- Record is company “%(company)s” and “%(field)s” (%(fname)s: %(values)s) belongs to another company.")
  3709. record_msg = _lt("- “%(record)s” belongs to company “%(company)s” and “%(field)s” (%(fname)s: %(values)s) belongs to another company.")
  3710. root_company_msg = _lt("- Only a root company can be set on “%(record)s”. Currently set to “%(company)s”")
  3711. for record, name, corecords in inconsistencies[:5]:
  3712. if record._name == 'res.company':
  3713. msg, company = company_msg, record
  3714. elif record == corecords and name == 'company_id':
  3715. msg, company = root_company_msg, record.company_id
  3716. else:
  3717. msg, company = record_msg, record.company_id
  3718. field = self.env['ir.model.fields']._get(self._name, name)
  3719. lines.append(str(msg) % {
  3720. 'record': record.display_name,
  3721. 'company': company.display_name,
  3722. 'field': field.field_description,
  3723. 'fname': field.name,
  3724. 'values': ", ".join(repr(rec.display_name) for rec in corecords),
  3725. })
  3726. raise UserError("\n".join(lines))
  3727. def check_access(self, operation: str) -> None:
  3728. """ Verify that the current user is allowed to perform ``operation`` on
  3729. all the records in ``self``. The method raises an :class:`AccessError`
  3730. if the operation is forbidden on the model in general, or on any record
  3731. in ``self``.
  3732. In particular, when ``self`` is empty, the method checks whether the
  3733. current user has some permission to perform ``operation`` on the model
  3734. in general::
  3735. # check that user has some minimal permission on the model
  3736. records.browse().check_access(operation)
  3737. """
  3738. if not self.env.su and (result := self._check_access(operation)):
  3739. raise result[1]()
  3740. def has_access(self, operation: str) -> bool:
  3741. """ Return whether the current user is allowed to perform ``operation``
  3742. on all the records in ``self``. The method is fully consistent with
  3743. method :meth:`check_access` but returns a boolean instead.
  3744. """
  3745. return self.env.su or not self._check_access(operation)
  3746. def _filtered_access(self, operation: str):
  3747. """ Return the subset of ``self`` for which the current user is allowed
  3748. to perform ``operation``. The method is fully equivalent to::
  3749. self.filtered(lambda record: record.has_access(operation))
  3750. """
  3751. if self and not self.env.su and (result := self._check_access(operation)):
  3752. return self - result[0]
  3753. return self
  3754. def _check_access(self, operation: str) -> tuple[Self, Callable] | None:
  3755. """ Return ``None`` if the current user has permission to perform
  3756. ``operation`` on the records ``self``. Otherwise, return a pair
  3757. ``(records, function)`` where ``records`` are the forbidden records, and
  3758. ``function`` can be used to create some corresponding exception.
  3759. This method provides the base implementation of
  3760. methods :meth:`check_access`, :meth:`has_access`
  3761. and :meth:`_filtered_access`. The method may be overridden in order to
  3762. restrict the access to ``self``.
  3763. """
  3764. Access = self.env['ir.model.access']
  3765. if not Access.check(self._name, operation, raise_exception=False):
  3766. return self, functools.partial(Access._make_access_error, self._name, operation)
  3767. # we only check access rules on real records, which should not be mixed
  3768. # with new records
  3769. if any(self._ids):
  3770. Rule = self.env['ir.rule']
  3771. domain = Rule._compute_domain(self._name, operation)
  3772. if domain and (forbidden := self - self.sudo().filtered_domain(domain)):
  3773. return forbidden, functools.partial(Rule._make_access_error, operation, forbidden)
  3774. return None
  3775. @api.model
  3776. def check_access_rights(self, operation, raise_exception=True):
  3777. """ Verify that the given operation is allowed for the current user accord to ir.model.access.
  3778. :param str operation: one of ``create``, ``read``, ``write``, ``unlink``
  3779. :param bool raise_exception: whether an exception should be raise if operation is forbidden
  3780. :return: whether the operation is allowed
  3781. :rtype: bool
  3782. :raise AccessError: if the operation is forbidden and raise_exception is True
  3783. """
  3784. warnings.warn(
  3785. "check_access_rights() is deprecated since 18.0; use check_access() instead.",
  3786. DeprecationWarning, 1,
  3787. )
  3788. if raise_exception:
  3789. return self.browse().check_access(operation)
  3790. return self.browse().has_access(operation)
  3791. def check_access_rule(self, operation):
  3792. """ Verify that the given operation is allowed for the current user according to ir.rules.
  3793. :param str operation: one of ``create``, ``read``, ``write``, ``unlink``
  3794. :return: None if the operation is allowed
  3795. :raise UserError: if current ``ir.rules`` do not permit this operation.
  3796. """
  3797. warnings.warn(
  3798. "check_access_rule() is deprecated since 18.0; use check_access() instead.",
  3799. DeprecationWarning, 1,
  3800. )
  3801. self.check_access(operation)
  3802. def _filter_access_rules(self, operation):
  3803. """ Return the subset of ``self`` for which ``operation`` is allowed. """
  3804. warnings.warn(
  3805. "_filter_access_rules() is deprecated since 18.0; use _filtered_access() instead.",
  3806. DeprecationWarning, 1,
  3807. )
  3808. return self._filtered_access(operation)
  3809. def _filter_access_rules_python(self, operation):
  3810. warnings.warn(
  3811. "_filter_access_rules_python() is deprecated since 18.0; use _filtered_access() instead.",
  3812. DeprecationWarning, 1,
  3813. )
  3814. return self._filtered_access(operation)
  3815. def unlink(self):
  3816. """ unlink()
  3817. Deletes the records in ``self``.
  3818. :raise AccessError: if the user is not allowed to delete all the given records
  3819. :raise UserError: if the record is default property for other records
  3820. """
  3821. if not self:
  3822. return True
  3823. self.check_access('unlink')
  3824. from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
  3825. for func in self._ondelete_methods:
  3826. # func._ondelete is True if it should be called during uninstallation
  3827. if func._ondelete or not self._context.get(MODULE_UNINSTALL_FLAG):
  3828. func(self)
  3829. # TOFIX: this avoids an infinite loop when trying to recompute a
  3830. # field, which triggers the recomputation of another field using the
  3831. # same compute function, which then triggers again the computation
  3832. # of those two fields
  3833. for field in self._fields.values():
  3834. self.env.remove_to_compute(field, self)
  3835. self.env.flush_all()
  3836. cr = self._cr
  3837. Data = self.env['ir.model.data'].sudo().with_context({})
  3838. Defaults = self.env['ir.default'].sudo()
  3839. Attachment = self.env['ir.attachment'].sudo()
  3840. ir_model_data_unlink = Data
  3841. ir_attachment_unlink = Attachment
  3842. # mark fields that depend on 'self' to recompute them after 'self' has
  3843. # been deleted (like updating a sum of lines after deleting one line)
  3844. with self.env.protecting(self._fields.values(), self):
  3845. self.modified(self._fields, before=True)
  3846. for sub_ids in cr.split_for_in_conditions(self.ids):
  3847. records = self.browse(sub_ids)
  3848. cr.execute(SQL(
  3849. "DELETE FROM %s WHERE id IN %s",
  3850. SQL.identifier(self._table), sub_ids,
  3851. ))
  3852. # Removing the ir_model_data reference if the record being deleted
  3853. # is a record created by xml/csv file, as these are not connected
  3854. # with real database foreign keys, and would be dangling references.
  3855. #
  3856. # Note: the following steps are performed as superuser to avoid
  3857. # access rights restrictions, and with no context to avoid possible
  3858. # side-effects during admin calls.
  3859. data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])
  3860. ir_model_data_unlink |= data
  3861. # For the same reason, remove the relevant records in ir_attachment
  3862. # (the search is performed with sql as the search method of
  3863. # ir_attachment is overridden to hide attachments of deleted
  3864. # records)
  3865. cr.execute(SQL(
  3866. "SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s",
  3867. self._name, sub_ids,
  3868. ))
  3869. ir_attachment_unlink |= Attachment.browse(row[0] for row in cr.fetchall())
  3870. # don't allow fallback value in ir.default for many2one company dependent fields to be deleted
  3871. # Exception: when MODULE_UNINSTALL_FLAG, these fallbacks can be deleted by Defaults.discard_records(records)
  3872. if (many2one_fields := self.env.registry.many2one_company_dependents[self._name]) and not self._context.get(MODULE_UNINSTALL_FLAG):
  3873. IrModelFields = self.env["ir.model.fields"]
  3874. field_ids = tuple(IrModelFields._get_ids(field.model_name).get(field.name) for field in many2one_fields)
  3875. sub_ids_json_text = tuple(json.dumps(id_) for id_ in sub_ids)
  3876. if default := Defaults.search([('field_id', 'in', field_ids), ('json_value', 'in', sub_ids_json_text)], limit=1, order='id desc'):
  3877. ir_field = default.field_id.sudo()
  3878. field = self.env[ir_field.model]._fields[ir_field.name]
  3879. record = self.browse(json.loads(default.json_value))
  3880. raise UserError(_('Unable to delete %(record)s because it is used as the default value of %(field)s', record=record, field=field))
  3881. # on delete set null/restrict for jsonb company dependent many2one
  3882. for field in many2one_fields:
  3883. model = self.env[field.model_name]
  3884. if field.ondelete == 'restrict' and not self._context.get(MODULE_UNINSTALL_FLAG):
  3885. if res := self.env.execute_query(SQL(
  3886. """
  3887. SELECT id, %(field)s
  3888. FROM %(table)s
  3889. WHERE %(field)s IS NOT NULL
  3890. AND %(field)s @? %(jsonpath)s
  3891. ORDER BY id
  3892. LIMIT 1
  3893. """,
  3894. table=SQL.identifier(model._table),
  3895. field=SQL.identifier(field.name),
  3896. jsonpath=f"$.* ? ({' || '.join(f'@ == {id_}' for id_ in sub_ids)})",
  3897. )):
  3898. on_restrict_id, field_json = res[0]
  3899. to_delete_id = next(iter(id_ for id_ in field_json.values()))
  3900. on_restrict_record = model.browse(on_restrict_id)
  3901. to_delete_record = self.browse(to_delete_id)
  3902. raise UserError(_('You cannot delete %(to_delete_record)s, as it is used by %(on_restrict_record)s',
  3903. to_delete_record=to_delete_record, on_restrict_record=on_restrict_record))
  3904. else:
  3905. self.env.execute_query(SQL(
  3906. """
  3907. UPDATE %(table)s
  3908. SET %(field)s = (
  3909. SELECT jsonb_object_agg(
  3910. key,
  3911. CASE
  3912. WHEN value::int4 in %(ids)s THEN NULL
  3913. ELSE value::int4
  3914. END)
  3915. FROM jsonb_each_text(%(field)s)
  3916. )
  3917. WHERE %(field)s IS NOT NULL
  3918. AND %(field)s @? %(jsonpath)s
  3919. """,
  3920. table=SQL.identifier(model._table),
  3921. field=SQL.identifier(field.name),
  3922. ids=sub_ids,
  3923. jsonpath=f"$.* ? ({' || '.join(f'@ == {id_}' for id_ in sub_ids)})",
  3924. ))
  3925. # For the same reason, remove the defaults having some of the
  3926. # records as value
  3927. Defaults.discard_records(records)
  3928. # invalidate the *whole* cache, since the orm does not handle all
  3929. # changes made in the database, like cascading delete!
  3930. self.env.invalidate_all(flush=False)
  3931. if ir_model_data_unlink:
  3932. ir_model_data_unlink.unlink()
  3933. if ir_attachment_unlink:
  3934. ir_attachment_unlink.unlink()
  3935. # auditing: deletions are infrequent and leave no trace in the database
  3936. _unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)
  3937. return True
  3938. def write(self, vals: ValuesType) -> typing.Literal[True]:
  3939. """ write(vals)
  3940. Updates all records in ``self`` with the provided values.
  3941. :param dict vals: fields to update and the value to set on them
  3942. :raise AccessError: if user is not allowed to modify the specified records/fields
  3943. :raise ValidationError: if invalid values are specified for selection fields
  3944. :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
  3945. * For numeric fields (:class:`~odoo.fields.Integer`,
  3946. :class:`~odoo.fields.Float`) the value should be of the
  3947. corresponding type
  3948. * For :class:`~odoo.fields.Boolean`, the value should be a
  3949. :class:`python:bool`
  3950. * For :class:`~odoo.fields.Selection`, the value should match the
  3951. selection values (generally :class:`python:str`, sometimes
  3952. :class:`python:int`)
  3953. * For :class:`~odoo.fields.Many2one`, the value should be the
  3954. database identifier of the record to set
  3955. * The expected value of a :class:`~odoo.fields.One2many` or
  3956. :class:`~odoo.fields.Many2many` relational field is a list of
  3957. :class:`~odoo.fields.Command` that manipulate the relation the
  3958. implement. There are a total of 7 commands:
  3959. :meth:`~odoo.fields.Command.create`,
  3960. :meth:`~odoo.fields.Command.update`,
  3961. :meth:`~odoo.fields.Command.delete`,
  3962. :meth:`~odoo.fields.Command.unlink`,
  3963. :meth:`~odoo.fields.Command.link`,
  3964. :meth:`~odoo.fields.Command.clear`, and
  3965. :meth:`~odoo.fields.Command.set`.
  3966. * For :class:`~odoo.fields.Date` and `~odoo.fields.Datetime`,
  3967. the value should be either a date(time), or a string.
  3968. .. warning::
  3969. If a string is provided for Date(time) fields,
  3970. it must be UTC-only and formatted according to
  3971. :const:`odoo.tools.misc.DEFAULT_SERVER_DATE_FORMAT` and
  3972. :const:`odoo.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT`
  3973. * Other non-relational fields use a string for value
  3974. """
  3975. if not self:
  3976. return True
  3977. self.check_access('write')
  3978. self.check_field_access_rights('write', vals.keys())
  3979. env = self.env
  3980. bad_names = {'id', 'parent_path'}
  3981. if self._log_access:
  3982. # the superuser can set log_access fields while loading registry
  3983. if not(self.env.uid == SUPERUSER_ID and not self.pool.ready):
  3984. bad_names.update(LOG_ACCESS_COLUMNS)
  3985. # set magic fields
  3986. vals = {key: val for key, val in vals.items() if key not in bad_names}
  3987. if self._log_access:
  3988. vals.setdefault('write_uid', self.env.uid)
  3989. vals.setdefault('write_date', self.env.cr.now())
  3990. field_values = [] # [(field, value)]
  3991. determine_inverses = defaultdict(list) # {inverse: fields}
  3992. fnames_modifying_relations = []
  3993. protected = set()
  3994. check_company = False
  3995. for fname, value in vals.items():
  3996. field = self._fields.get(fname)
  3997. if not field:
  3998. raise ValueError("Invalid field %r on model %r" % (fname, self._name))
  3999. field_values.append((field, value))
  4000. if field.inverse:
  4001. if field.type in ('one2many', 'many2many'):
  4002. # The written value is a list of commands that must applied
  4003. # on the field's current value. Because the field is
  4004. # protected while being written, the field's current value
  4005. # will not be computed and default to an empty recordset. So
  4006. # make sure the field's value is in cache before writing, in
  4007. # order to avoid an inconsistent update.
  4008. self[fname]
  4009. determine_inverses[field.inverse].append(field)
  4010. if self.pool.is_modifying_relations(field):
  4011. fnames_modifying_relations.append(fname)
  4012. if field.inverse or (field.compute and not field.readonly):
  4013. if field.store or field.type not in ('one2many', 'many2many'):
  4014. # Protect the field from being recomputed while being
  4015. # inversed. In the case of non-stored x2many fields, the
  4016. # field's value may contain unexpeced new records (created
  4017. # by command 0). Those new records are necessary for
  4018. # inversing the field, but should no longer appear if the
  4019. # field is recomputed afterwards. Not protecting the field
  4020. # will automatically invalidate the field from the cache,
  4021. # forcing its value to be recomputed once dependencies are
  4022. # up-to-date.
  4023. protected.update(self.pool.field_computed.get(field, [field]))
  4024. if fname == 'company_id' or (field.relational and field.check_company):
  4025. check_company = True
  4026. # force the computation of fields that are computed with some assigned
  4027. # fields, but are not assigned themselves
  4028. to_compute = [field.name
  4029. for field in protected
  4030. if field.compute and field.name not in vals]
  4031. if to_compute:
  4032. self._recompute_recordset(to_compute)
  4033. # protect fields being written against recomputation
  4034. with env.protecting(protected, self):
  4035. # Determine records depending on values. When modifying a relational
  4036. # field, you have to recompute what depends on the field's values
  4037. # before and after modification. This is because the modification
  4038. # has an impact on the "data path" between a computed field and its
  4039. # dependency. Note that this double call to modified() is only
  4040. # necessary for relational fields.
  4041. #
  4042. # It is best explained with a simple example: consider two sales
  4043. # orders SO1 and SO2. The computed total amount on sales orders
  4044. # indirectly depends on the many2one field 'order_id' linking lines
  4045. # to their sales order. Now consider the following code:
  4046. #
  4047. # line = so1.line_ids[0] # pick a line from SO1
  4048. # line.order_id = so2 # move the line to SO2
  4049. #
  4050. # In this situation, the total amount must be recomputed on *both*
  4051. # sales order: the line's order before the modification, and the
  4052. # line's order after the modification.
  4053. self.modified(fnames_modifying_relations, before=True)
  4054. real_recs = self.filtered('id')
  4055. # field.write_sequence determines a priority for writing on fields.
  4056. # Monetary fields need their corresponding currency field in cache
  4057. # for rounding values. X2many fields must be written last, because
  4058. # they flush other fields when deleting lines.
  4059. for field, value in sorted(field_values, key=lambda item: item[0].write_sequence):
  4060. field.write(self, value)
  4061. # determine records depending on new values
  4062. #
  4063. # Call modified after write, because the modified can trigger a
  4064. # search which can trigger a flush which can trigger a recompute
  4065. # which remove the field from the recompute list while all the
  4066. # values required for the computation could not be yet in cache.
  4067. # e.g. Write on `name` of `res.partner` trigger the recompute of
  4068. # `display_name`, which triggers a search on child_ids to find the
  4069. # childs to which the display_name must be recomputed, which
  4070. # triggers the flush of `display_name` because the _order of
  4071. # res.partner includes display_name. The computation of display_name
  4072. # is then done too soon because the parent_id was not yet written.
  4073. # (`test_01_website_reset_password_tour`)
  4074. self.modified(vals)
  4075. if self._parent_store and self._parent_name in vals:
  4076. self.flush_model([self._parent_name])
  4077. # validate non-inversed fields first
  4078. inverse_fields = [f.name for fs in determine_inverses.values() for f in fs]
  4079. real_recs._validate_fields(vals, inverse_fields)
  4080. for fields in determine_inverses.values():
  4081. # write again on non-stored fields that have been invalidated from cache
  4082. for field in fields:
  4083. if not field.store and any(self.env.cache.get_missing_ids(real_recs, field)):
  4084. field.write(real_recs, vals[field.name])
  4085. # inverse records that are not being computed
  4086. try:
  4087. fields[0].determine_inverse(real_recs)
  4088. except AccessError as e:
  4089. if fields[0].inherited:
  4090. description = self.env['ir.model']._get(self._name).name
  4091. raise AccessError(_(
  4092. "%(previous_message)s\n\nImplicitly accessed through '%(document_kind)s' (%(document_model)s).",
  4093. previous_message=e.args[0],
  4094. document_kind=description,
  4095. document_model=self._name,
  4096. ))
  4097. raise
  4098. # validate inversed fields
  4099. real_recs._validate_fields(inverse_fields)
  4100. if check_company and self._check_company_auto:
  4101. self._check_company()
  4102. return True
  4103. def _write(self, vals):
  4104. """ Low-level implementation of write() """
  4105. return self._write_multi([vals] * len(self))
  4106. def _write_multi(self, vals_list):
  4107. """ Low-level implementation of write() """
  4108. assert len(self) == len(vals_list)
  4109. if not self:
  4110. return
  4111. # determine records that require updating parent_path
  4112. parent_records = self._parent_store_update_prepare(vals_list)
  4113. if self._log_access:
  4114. # set magic fields (already done by write(), but not for computed fields)
  4115. log_vals = {'write_uid': self.env.uid, 'write_date': self.env.cr.now()}
  4116. vals_list = [(log_vals | vals) for vals in vals_list]
  4117. # determine SQL updates, grouped by set of updated fields:
  4118. # {(col1, col2, col3): [(id, val1, val2, val3)]}
  4119. updates = defaultdict(list)
  4120. for record, vals in zip(self, vals_list):
  4121. # sort vals.items() by key, then retrieve its keys and values
  4122. fnames, row = zip(*sorted(vals.items()))
  4123. updates[fnames].append(record._ids + row)
  4124. # perform updates (fnames, rows) in batches
  4125. updates_list = [
  4126. (fnames, sub_rows)
  4127. for fnames, rows in updates.items()
  4128. for sub_rows in split_every(UPDATE_BATCH_SIZE, rows)
  4129. ]
  4130. # update columns by group of updated fields
  4131. for fnames, rows in updates_list:
  4132. columns = []
  4133. assignments = []
  4134. for fname in fnames:
  4135. field = self._fields[fname]
  4136. assert field.store and field.column_type
  4137. column = SQL.identifier(fname)
  4138. # the type cast is necessary for some values, like NULLs
  4139. expr = SQL('"__tmp".%s::%s', column, SQL(field.column_type[1]))
  4140. if field.translate is True:
  4141. # this is the SQL equivalent of:
  4142. # None if expr is None else (
  4143. # (column or {'en_US': next(iter(expr.values()))}) | expr
  4144. # )
  4145. expr = SQL(
  4146. """CASE WHEN %(expr)s IS NULL THEN NULL ELSE
  4147. COALESCE(%(table)s.%(column)s, jsonb_build_object(
  4148. 'en_US', jsonb_path_query_first(%(expr)s, '$.*')
  4149. )) || %(expr)s
  4150. END""",
  4151. table=SQL.identifier(self._table),
  4152. column=column,
  4153. expr=expr,
  4154. )
  4155. if field.company_dependent:
  4156. fallbacks = self.env['ir.default']._get_field_column_fallbacks(self._name, fname)
  4157. expr = SQL(
  4158. """(SELECT jsonb_object_agg(d.key, d.value)
  4159. FROM jsonb_each(COALESCE(%(table)s.%(column)s, '{}'::jsonb) || %(expr)s) d
  4160. JOIN jsonb_each(%(fallbacks)s) f
  4161. ON d.key = f.key AND d.value != f.value)""",
  4162. table=SQL.identifier(self._table),
  4163. column=column,
  4164. expr=expr,
  4165. fallbacks=fallbacks
  4166. )
  4167. columns.append(column)
  4168. assignments.append(SQL("%s = %s", column, expr))
  4169. self.env.execute_query(SQL(
  4170. """ UPDATE %(table)s
  4171. SET %(assignments)s
  4172. FROM (VALUES %(values)s) AS "__tmp"("id", %(columns)s)
  4173. WHERE %(table)s."id" = "__tmp"."id"
  4174. """,
  4175. table=SQL.identifier(self._table),
  4176. assignments=SQL(", ").join(assignments),
  4177. values=SQL(", ").join(rows),
  4178. columns=SQL(", ").join(columns),
  4179. ))
  4180. # update parent_path
  4181. if parent_records:
  4182. parent_records._parent_store_update()
  4183. @api.model_create_multi
  4184. def create(self, vals_list: list[ValuesType]) -> Self:
  4185. """ create(vals_list) -> records
  4186. Creates new records for the model.
  4187. The new records are initialized using the values from the list of dicts
  4188. ``vals_list``, and if necessary those from :meth:`~.default_get`.
  4189. :param Union[list[dict], dict] vals_list:
  4190. values for the model's fields, as a list of dictionaries::
  4191. [{'field_name': field_value, ...}, ...]
  4192. For backward compatibility, ``vals_list`` may be a dictionary.
  4193. It is treated as a singleton list ``[vals]``, and a single record
  4194. is returned.
  4195. see :meth:`~.write` for details
  4196. :return: the created records
  4197. :raise AccessError: if the current user is not allowed to create records of the specified model
  4198. :raise ValidationError: if user tries to enter invalid value for a selection field
  4199. :raise ValueError: if a field name specified in the create values does not exist.
  4200. :raise UserError: if a loop would be created in a hierarchy of objects a result of the operation
  4201. (such as setting an object as its own parent)
  4202. """
  4203. if not vals_list:
  4204. return self.browse()
  4205. self = self.browse()
  4206. self.check_access('create')
  4207. new_vals_list = self._prepare_create_values(vals_list)
  4208. # classify fields for each record
  4209. data_list = []
  4210. determine_inverses = defaultdict(set) # {inverse: fields}
  4211. for vals in new_vals_list:
  4212. precomputed = vals.pop('__precomputed__', ())
  4213. # distribute fields into sets for various purposes
  4214. data = {}
  4215. data['stored'] = stored = {}
  4216. data['inversed'] = inversed = {}
  4217. data['inherited'] = inherited = defaultdict(dict)
  4218. data['protected'] = protected = set()
  4219. for key, val in vals.items():
  4220. field = self._fields.get(key)
  4221. if not field:
  4222. raise ValueError("Invalid field %r on model %r" % (key, self._name))
  4223. if field.store:
  4224. stored[key] = val
  4225. if field.inherited:
  4226. inherited[field.related_field.model_name][key] = val
  4227. elif field.inverse and field not in precomputed:
  4228. inversed[key] = val
  4229. determine_inverses[field.inverse].add(field)
  4230. # protect editable computed fields and precomputed fields
  4231. # against (re)computation
  4232. if field.compute and (not field.readonly or field.precompute):
  4233. protected.update(self.pool.field_computed.get(field, [field]))
  4234. data_list.append(data)
  4235. # create or update parent records
  4236. for model_name, parent_name in self._inherits.items():
  4237. parent_data_list = []
  4238. for data in data_list:
  4239. if not data['stored'].get(parent_name):
  4240. parent_data_list.append(data)
  4241. elif data['inherited'][model_name]:
  4242. parent = self.env[model_name].browse(data['stored'][parent_name])
  4243. parent.write(data['inherited'][model_name])
  4244. if parent_data_list:
  4245. parents = self.env[model_name].create([
  4246. data['inherited'][model_name]
  4247. for data in parent_data_list
  4248. ])
  4249. for parent, data in zip(parents, parent_data_list):
  4250. data['stored'][parent_name] = parent.id
  4251. # create records with stored fields
  4252. records = self._create(data_list)
  4253. # protect fields being written against recomputation
  4254. protected = [(data['protected'], data['record']) for data in data_list]
  4255. with self.env.protecting(protected):
  4256. # call inverse method for each group of fields
  4257. for fields in determine_inverses.values():
  4258. # determine which records to inverse for those fields
  4259. inv_names = {field.name for field in fields}
  4260. rec_vals = [
  4261. (data['record'], {
  4262. name: data['inversed'][name]
  4263. for name in inv_names
  4264. if name in data['inversed'] and name not in data['stored']
  4265. })
  4266. for data in data_list
  4267. if not inv_names.isdisjoint(data['inversed'])
  4268. ]
  4269. # If a field is not stored, its inverse method will probably
  4270. # write on its dependencies, which will invalidate the field on
  4271. # all records. We therefore inverse the field record by record.
  4272. if all(field.store or field.company_dependent for field in fields):
  4273. batches = [rec_vals]
  4274. else:
  4275. batches = [[rec_data] for rec_data in rec_vals]
  4276. for batch in batches:
  4277. for record, vals in batch:
  4278. record._update_cache(vals)
  4279. batch_recs = self.concat(*(record for record, vals in batch))
  4280. next(iter(fields)).determine_inverse(batch_recs)
  4281. # check Python constraints for non-stored inversed fields
  4282. for data in data_list:
  4283. data['record']._validate_fields(data['inversed'], data['stored'])
  4284. if self._check_company_auto:
  4285. records._check_company()
  4286. import_module = self.env.context.get('_import_current_module')
  4287. if not import_module: # not an import -> bail
  4288. return records
  4289. # It is to support setting xids directly in create by
  4290. # providing an "id" key (otherwise stripped by create) during an import
  4291. # (which should strip 'id' from the input data anyway)
  4292. noupdate = self.env.context.get('noupdate', False)
  4293. xids = (v.get('id') for v in vals_list)
  4294. self.env['ir.model.data']._update_xmlids([
  4295. {
  4296. 'xml_id': xid if '.' in xid else ('%s.%s' % (import_module, xid)),
  4297. 'record': rec,
  4298. # note: this is not used when updating o2ms above...
  4299. 'noupdate': noupdate,
  4300. }
  4301. for rec, xid in zip(records, xids)
  4302. if xid and isinstance(xid, str)
  4303. ])
  4304. return records
  4305. def _prepare_create_values(self, vals_list):
  4306. """ Clean up and complete the given create values, and return a list of
  4307. new vals containing:
  4308. * default values,
  4309. * discarded forbidden values (magic fields),
  4310. * precomputed fields.
  4311. :param list vals_list: List of create values
  4312. :returns: new list of completed create values
  4313. :rtype: dict
  4314. """
  4315. bad_names = ['id', 'parent_path']
  4316. if self._log_access:
  4317. # the superuser can set log_access fields while loading registry
  4318. if not(self.env.uid == SUPERUSER_ID and not self.pool.ready):
  4319. bad_names.extend(LOG_ACCESS_COLUMNS)
  4320. # also discard precomputed readonly fields (to force their computation)
  4321. bad_names.extend(
  4322. fname
  4323. for fname, field in self._fields.items()
  4324. if field.precompute and field.readonly
  4325. )
  4326. result_vals_list = []
  4327. for vals in vals_list:
  4328. # add default values
  4329. vals = self._add_missing_default_values(vals)
  4330. # add magic fields
  4331. for fname in bad_names:
  4332. vals.pop(fname, None)
  4333. if self._log_access:
  4334. vals.setdefault('create_uid', self.env.uid)
  4335. vals.setdefault('create_date', self.env.cr.now())
  4336. vals.setdefault('write_uid', self.env.uid)
  4337. vals.setdefault('write_date', self.env.cr.now())
  4338. result_vals_list.append(vals)
  4339. # add precomputed fields
  4340. self._add_precomputed_values(result_vals_list)
  4341. return result_vals_list
  4342. def _add_precomputed_values(self, vals_list):
  4343. """ Add missing precomputed fields to ``vals_list`` values.
  4344. Only applies for precompute=True fields.
  4345. :param dict vals_list: list(dict) of create values
  4346. """
  4347. precomputable = {
  4348. fname: field
  4349. for fname, field in self._fields.items()
  4350. if field.precompute
  4351. }
  4352. if not precomputable:
  4353. return
  4354. # determine which vals must be completed
  4355. vals_list_todo = [
  4356. vals
  4357. for vals in vals_list
  4358. if any(fname not in vals for fname in precomputable)
  4359. ]
  4360. if not vals_list_todo:
  4361. return
  4362. # create new records for the vals that must be completed
  4363. records = self.browse().concat(*(self.new(vals) for vals in vals_list_todo))
  4364. for record, vals in zip(records, vals_list_todo):
  4365. vals['__precomputed__'] = precomputed = set()
  4366. for fname, field in precomputable.items():
  4367. if fname not in vals:
  4368. # computed stored fields with a column
  4369. # have to be computed before create
  4370. # s.t. required and constraints can be applied on those fields.
  4371. vals[fname] = field.convert_to_write(record[fname], self)
  4372. precomputed.add(field)
  4373. @api.model
  4374. def _create(self, data_list):
  4375. """ Create records from the stored field values in ``data_list``. """
  4376. assert data_list
  4377. cr = self.env.cr
  4378. # insert rows in batches of maximum INSERT_BATCH_SIZE
  4379. ids = [] # ids of created records
  4380. other_fields = OrderedSet() # non-column fields
  4381. for data_sublist in split_every(INSERT_BATCH_SIZE, data_list):
  4382. stored_list = [data['stored'] for data in data_sublist]
  4383. fnames = sorted({name for stored in stored_list for name in stored})
  4384. columns = []
  4385. rows = [[] for _ in stored_list]
  4386. for fname in fnames:
  4387. field = self._fields[fname]
  4388. if field.column_type:
  4389. columns.append(fname)
  4390. for stored, row in zip(stored_list, rows):
  4391. if fname in stored:
  4392. row.append(field.convert_to_column_insert(stored[fname], self, stored))
  4393. else:
  4394. row.append(SQL_DEFAULT)
  4395. else:
  4396. other_fields.add(field)
  4397. if field.type == 'properties':
  4398. # force calling fields.create for properties field because
  4399. # we might want to update the parent definition
  4400. other_fields.add(field)
  4401. if not columns:
  4402. # manage the case where we create empty records
  4403. columns = ['id']
  4404. for row in rows:
  4405. row.append(SQL_DEFAULT)
  4406. cr.execute(SQL(
  4407. 'INSERT INTO %s (%s) VALUES %s RETURNING "id"',
  4408. SQL.identifier(self._table),
  4409. SQL(', ').join(map(SQL.identifier, columns)),
  4410. SQL(', ').join(tuple(row) for row in rows),
  4411. ))
  4412. ids.extend(id_ for id_, in cr.fetchall())
  4413. # put the new records in cache, and update inverse fields, for many2one
  4414. # (using bin_size=False to put binary values in the right place)
  4415. #
  4416. # cachetoclear is an optimization to avoid modified()'s cost until other_fields are processed
  4417. cachetoclear = []
  4418. records = self.browse(ids)
  4419. inverses_update = defaultdict(list) # {(field, value): ids}
  4420. common_set_vals = set(LOG_ACCESS_COLUMNS + ['id', 'parent_path'])
  4421. for data, record in zip(data_list, records.with_context(bin_size=False)):
  4422. data['record'] = record
  4423. # DLE P104: test_inherit.py, test_50_search_one2many
  4424. vals = dict({k: v for d in data['inherited'].values() for k, v in d.items()}, **data['stored'])
  4425. set_vals = common_set_vals.union(vals)
  4426. for field in self._fields.values():
  4427. if field.type in ('one2many', 'many2many'):
  4428. self.env.cache.set(record, field, ())
  4429. elif field.related and not field.column_type:
  4430. self.env.cache.set(record, field, field.convert_to_cache(None, record))
  4431. # DLE P123: `test_adv_activity`, `test_message_assignation_inbox`, `test_message_log`, `test_create_mail_simple`, ...
  4432. # Set `mail.message.parent_id` to False in cache so it doesn't do the useless SELECT when computing the modified of `child_ids`
  4433. # in other words, if `parent_id` is not set, no other message `child_ids` are impacted.
  4434. # + avoid the fetch of fields which are False. e.g. if a boolean field is not passed in vals and as no default set in the field attributes,
  4435. # then we know it can be set to False in the cache in the case of a create.
  4436. elif field.store and field.name not in set_vals and not field.compute:
  4437. self.env.cache.set(record, field, field.convert_to_cache(None, record))
  4438. for fname, value in vals.items():
  4439. field = self._fields[fname]
  4440. if field.type in ('one2many', 'many2many'):
  4441. cachetoclear.append((record, field))
  4442. else:
  4443. cache_value = field.convert_to_cache(value, record)
  4444. self.env.cache.set(record, field, cache_value)
  4445. if field.type in ('many2one', 'many2one_reference') and self.pool.field_inverses[field]:
  4446. inverses_update[(field, cache_value)].append(record.id)
  4447. for (field, value), record_ids in inverses_update.items():
  4448. field._update_inverses(self.browse(record_ids), value)
  4449. # update parent_path
  4450. records._parent_store_create()
  4451. # protect fields being written against recomputation
  4452. protected = [(data['protected'], data['record']) for data in data_list]
  4453. with self.env.protecting(protected):
  4454. # mark computed fields as todo
  4455. records.modified(self._fields, create=True)
  4456. if other_fields:
  4457. # discard default values from context for other fields
  4458. others = records.with_context(clean_context(self._context))
  4459. for field in sorted(other_fields, key=attrgetter('_sequence')):
  4460. field.create([
  4461. (other, data['stored'][field.name])
  4462. for other, data in zip(others, data_list)
  4463. if field.name in data['stored']
  4464. ])
  4465. # mark fields to recompute
  4466. records.modified([field.name for field in other_fields], create=True)
  4467. # if value in cache has not been updated by other_fields, remove it
  4468. for record, field in cachetoclear:
  4469. if self.env.cache.contains(record, field) and not self.env.cache.get(record, field):
  4470. self.env.cache.remove(record, field)
  4471. # check Python constraints for stored fields
  4472. records._validate_fields(name for data in data_list for name in data['stored'])
  4473. records.check_access('create')
  4474. return records
  4475. def _compute_field_value(self, field):
  4476. fields.determine(field.compute, self)
  4477. if field.store and any(self._ids):
  4478. # check constraints of the fields that have been computed
  4479. fnames = [f.name for f in self.pool.field_computed[field]]
  4480. self.filtered('id')._validate_fields(fnames)
  4481. def _parent_store_create(self):
  4482. """ Set the parent_path field on ``self`` after its creation. """
  4483. if not self._parent_store:
  4484. return
  4485. self._cr.execute(SQL(
  4486. """ UPDATE %(table)s node
  4487. SET parent_path=concat((
  4488. SELECT parent.parent_path
  4489. FROM %(table)s parent
  4490. WHERE parent.id=node.%(parent)s
  4491. ), node.id, '/')
  4492. WHERE node.id IN %(ids)s
  4493. RETURNING node.id, node.parent_path """,
  4494. table=SQL.identifier(self._table),
  4495. parent=SQL.identifier(self._parent_name),
  4496. ids=tuple(self.ids),
  4497. ))
  4498. # update the cache of updated nodes, and determine what to recompute
  4499. updated = dict(self._cr.fetchall())
  4500. records = self.browse(updated)
  4501. self.env.cache.update(records, self._fields['parent_path'], updated.values())
  4502. def _parent_store_update_prepare(self, vals_list):
  4503. """ Return the records in ``self`` that must update their parent_path
  4504. field. This must be called before updating the parent field.
  4505. """
  4506. if not self._parent_store:
  4507. return self.browse()
  4508. # associate each new parent_id to its corresponding record ids
  4509. parent_to_ids = defaultdict(list)
  4510. for id_, vals in zip(self._ids, vals_list):
  4511. if self._parent_name in vals:
  4512. parent_to_ids[vals[self._parent_name]].append(id_)
  4513. if not parent_to_ids:
  4514. return self.browse()
  4515. self.flush_recordset([self._parent_name])
  4516. # return the records for which the parent field will change
  4517. sql_parent = SQL.identifier(self._parent_name)
  4518. conditions = []
  4519. for parent_id, ids in parent_to_ids.items():
  4520. if parent_id:
  4521. condition = SQL('(%s != %s OR %s IS NULL)', sql_parent, parent_id, sql_parent)
  4522. else:
  4523. condition = SQL('%s IS NOT NULL', sql_parent)
  4524. conditions.append(SQL('("id" IN %s AND %s)', tuple(ids), condition))
  4525. rows = self.env.execute_query(SQL(
  4526. "SELECT id FROM %s WHERE %s ORDER BY id",
  4527. SQL.identifier(self._table),
  4528. SQL(" OR ").join(conditions),
  4529. ))
  4530. return self.browse(row[0] for row in rows)
  4531. def _parent_store_update(self):
  4532. """ Update the parent_path field of ``self``. """
  4533. for parent, records in self.grouped(self._parent_name).items():
  4534. # determine new prefix of parent_path of records
  4535. prefix = parent.parent_path or ""
  4536. # check for recursion
  4537. if prefix:
  4538. parent_ids = {int(label) for label in prefix.split('/')[:-1]}
  4539. if not parent_ids.isdisjoint(records._ids):
  4540. raise UserError(_("Recursion Detected."))
  4541. # update parent_path of all records and their descendants
  4542. rows = self.env.execute_query(SQL(
  4543. """ UPDATE %(table)s child
  4544. SET parent_path = concat(%(prefix)s, substr(child.parent_path,
  4545. length(node.parent_path) - length(node.id || '/') + 1))
  4546. FROM %(table)s node
  4547. WHERE node.id IN %(ids)s
  4548. AND child.parent_path LIKE concat(node.parent_path, %(wildcard)s)
  4549. RETURNING child.id, child.parent_path """,
  4550. table=SQL.identifier(self._table),
  4551. prefix=prefix,
  4552. ids=tuple(records.ids),
  4553. wildcard='%',
  4554. ))
  4555. # update the cache of updated nodes, and determine what to recompute
  4556. updated = dict(rows)
  4557. records = self.browse(updated)
  4558. self.env.cache.update(records, self._fields['parent_path'], updated.values())
  4559. records.modified(['parent_path'])
  4560. def _clean_properties(self) -> None:
  4561. """ Remove all properties of ``self`` that are no longer in the related definition """
  4562. for fname, field in self._fields.items():
  4563. if field.type != 'properties':
  4564. continue
  4565. for record in self:
  4566. old_value = record[fname]
  4567. if not old_value:
  4568. continue
  4569. definitions = field._get_properties_definition(record)
  4570. all_names = {definition['name'] for definition in definitions}
  4571. new_values = {name: value for name, value in old_value.items() if name in all_names}
  4572. if len(new_values) != len(old_value):
  4573. record[fname] = new_values
  4574. def _load_records_write(self, values):
  4575. self.ensure_one()
  4576. to_write = {} # Deferred the write to avoid using the old definition if it changed
  4577. for fname in list(values):
  4578. if fname not in self or self._fields[fname].type != 'properties':
  4579. continue
  4580. field_converter = self._fields[fname].convert_to_cache
  4581. to_write[fname] = dict(self[fname], **field_converter(values.pop(fname), self))
  4582. self.write(values)
  4583. if to_write:
  4584. self.write(to_write)
  4585. # Because we don't know which properties was linked to which definition,
  4586. # we can know clean properties (note that it is not mandatory, we can wait
  4587. # that client change the record in a Form view)
  4588. self._clean_properties()
  4589. def _load_records_create(self, vals_list):
  4590. records = self.create(vals_list)
  4591. if any(field.type == 'properties' for field in self._fields.values()):
  4592. records._clean_properties()
  4593. return records
  4594. def _load_records(self, data_list, update=False, ignore_duplicates=False):
  4595. """ Create or update records of this model, and assign XMLIDs.
  4596. :param data_list: list of dicts with keys `xml_id` (XMLID to
  4597. assign), `noupdate` (flag on XMLID), `values` (field values)
  4598. :param update: should be ``True`` when upgrading a module
  4599. :param ignore_duplicates: if true, inputs that match records already in the DB will be ignored
  4600. :return: the records corresponding to ``data_list``
  4601. """
  4602. original_self = self.browse()
  4603. # records created during installation should not display messages
  4604. self = self.with_context(install_mode=True)
  4605. imd = self.env['ir.model.data'].sudo()
  4606. # The algorithm below partitions 'data_list' into three sets: the ones
  4607. # to create, the ones to update, and the others. For each set, we assign
  4608. # data['record'] for each data. All those records are then retrieved for
  4609. # the result.
  4610. # determine existing xml_ids
  4611. xml_ids = [data['xml_id'] for data in data_list if data.get('xml_id')]
  4612. existing = {
  4613. ("%s.%s" % row[1:3]): row
  4614. for row in imd._lookup_xmlids(xml_ids, self)
  4615. }
  4616. # determine which records to create and update
  4617. to_create = [] # list of data
  4618. to_update = [] # list of data
  4619. imd_data_list = [] # list of data for _update_xmlids()
  4620. for data in data_list:
  4621. xml_id = data.get('xml_id')
  4622. if not xml_id:
  4623. vals = data['values']
  4624. if vals.get('id'):
  4625. data['record'] = self.browse(vals['id'])
  4626. to_update.append(data)
  4627. elif not update:
  4628. to_create.append(data)
  4629. continue
  4630. row = existing.get(xml_id)
  4631. if not row:
  4632. to_create.append(data)
  4633. continue
  4634. d_id, d_module, d_name, d_model, d_res_id, d_noupdate, r_id = row
  4635. if self._name != d_model:
  4636. raise ValidationError(
  4637. f"For external id {xml_id} "
  4638. f"when trying to create/update a record of model {self._name} "
  4639. f"found record of different model {d_model} ({d_id})"
  4640. )
  4641. record = self.browse(d_res_id)
  4642. if r_id:
  4643. data['record'] = record
  4644. imd_data_list.append(data)
  4645. if not (update and d_noupdate):
  4646. to_update.append(data)
  4647. else:
  4648. imd.browse(d_id).unlink()
  4649. to_create.append(data)
  4650. # update existing records
  4651. if ignore_duplicates:
  4652. data_list = [data for data in data_list if data not in to_update]
  4653. else:
  4654. for data in to_update:
  4655. data['record']._load_records_write(data['values'])
  4656. # check for records to create with an XMLID from another module
  4657. module = self.env.context.get('install_module')
  4658. if module:
  4659. prefix = module + "."
  4660. for data in to_create:
  4661. if data.get('xml_id') and not data['xml_id'].startswith(prefix) and not self.env.context.get('foreign_record_to_create'):
  4662. _logger.warning("Creating record %s in module %s.", data['xml_id'], module)
  4663. if self.env.context.get('import_file'):
  4664. existing_modules = self.env['ir.module.module'].sudo().search([]).mapped('name')
  4665. for data in to_create:
  4666. xml_id = data.get('xml_id')
  4667. if xml_id and not data.get('noupdate'):
  4668. module_name, sep, record_id = xml_id.partition('.')
  4669. if sep and module_name in existing_modules:
  4670. raise UserError(
  4671. _("The record %(xml_id)s has the module prefix %(module_name)s. This is the part before the '.' in the external id. Because the prefix refers to an existing module, the record would be deleted when the module is upgraded. Use either no prefix and no dot or a prefix that isn't an existing module. For example, __import__, resulting in the external id __import__.%(record_id)s.",
  4672. xml_id=xml_id, module_name=module_name, record_id=record_id))
  4673. # create records
  4674. if to_create:
  4675. records = self._load_records_create([data['values'] for data in to_create])
  4676. for data, record in zip(to_create, records):
  4677. data['record'] = record
  4678. if data.get('xml_id'):
  4679. # add XML ids for parent records that have just been created
  4680. for parent_model, parent_field in self._inherits.items():
  4681. if not data['values'].get(parent_field):
  4682. imd_data_list.append({
  4683. 'xml_id': f"{data['xml_id']}_{parent_model.replace('.', '_')}",
  4684. 'record': record[parent_field],
  4685. 'noupdate': data.get('noupdate', False),
  4686. })
  4687. imd_data_list.append(data)
  4688. # create or update XMLIDs
  4689. imd._update_xmlids(imd_data_list, update)
  4690. return original_self.concat(*(data['record'] for data in data_list))
  4691. @api.model
  4692. def _where_calc(self, domain, active_test=True):
  4693. """Computes the WHERE clause needed to implement an OpenERP domain.
  4694. :param list domain: the domain to compute
  4695. :param bool active_test: whether the default filtering of records with
  4696. ``active`` field set to ``False`` should be applied.
  4697. :return: the query expressing the given domain as provided in domain
  4698. :rtype: Query
  4699. """
  4700. # if the object has an active field ('active', 'x_active'), filter out all
  4701. # inactive records unless they were explicitly asked for
  4702. if self._active_name and active_test and self._context.get('active_test', True):
  4703. # the item[0] trick below works for domain items and '&'/'|'/'!'
  4704. # operators too
  4705. if not any(item[0] == self._active_name for item in domain):
  4706. domain = [(self._active_name, '=', 1)] + domain
  4707. if domain:
  4708. return expression.expression(domain, self).query
  4709. else:
  4710. return Query(self.env, self._table, self._table_sql)
  4711. def _check_qorder(self, word):
  4712. if not regex_order.match(word):
  4713. raise UserError(_(
  4714. "Invalid \"order\" specified (%s)."
  4715. " A valid \"order\" specification is a comma-separated list of valid field names"
  4716. " (optionally followed by asc/desc for the direction)",
  4717. word,
  4718. ))
  4719. return True
  4720. @api.model
  4721. def _apply_ir_rules(self, query, mode='read'):
  4722. """Add what's missing in ``query`` to implement all appropriate ir.rules
  4723. (using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
  4724. :param query: the current query object
  4725. """
  4726. if self.env.su:
  4727. return
  4728. # apply main rules on the object
  4729. Rule = self.env['ir.rule']
  4730. domain = Rule._compute_domain(self._name, mode)
  4731. if domain:
  4732. expression.expression(domain, self.sudo(), self._table, query)
  4733. def _order_to_sql(self, order: str, query: Query, alias: (str | None) = None,
  4734. reverse: bool = False) -> SQL:
  4735. """ Return an :class:`SQL` object that represents the given ORDER BY
  4736. clause, without the ORDER BY keyword. The method also checks whether
  4737. the fields in the order are accessible for reading.
  4738. """
  4739. order = order or self._order
  4740. if not order:
  4741. return SQL()
  4742. self._check_qorder(order)
  4743. alias = alias or self._table
  4744. terms = []
  4745. for order_part in order.split(','):
  4746. order_match = regex_order.match(order_part)
  4747. field_name = order_match['field']
  4748. property_name = order_match['property']
  4749. if property_name:
  4750. field_name = f"{field_name}.{property_name}"
  4751. direction = (order_match['direction'] or '').upper()
  4752. nulls = (order_match['nulls'] or '').upper()
  4753. if reverse:
  4754. direction = 'ASC' if direction == 'DESC' else 'DESC'
  4755. if nulls:
  4756. nulls = 'NULLS LAST' if nulls == 'NULLS FIRST' else 'NULLS FIRST'
  4757. sql_direction = SQL(direction) if direction in ('ASC', 'DESC') else SQL()
  4758. sql_nulls = SQL(nulls) if nulls in ('NULLS FIRST', 'NULLS LAST') else SQL()
  4759. term = self._order_field_to_sql(alias, field_name, sql_direction, sql_nulls, query)
  4760. if term:
  4761. terms.append(term)
  4762. return SQL(", ").join(terms)
  4763. def _order_field_to_sql(self, alias: str, field_name: str, direction: SQL,
  4764. nulls: SQL, query: Query) -> SQL:
  4765. """ Return an :class:`SQL` object that represents the ordering by the
  4766. given field. The method also checks whether the field is accessible for
  4767. reading.
  4768. :param direction: one of ``SQL("ASC")``, ``SQL("DESC")``, ``SQL()``
  4769. :param nulls: one of ``SQL("NULLS FIRST")``, ``SQL("NULLS LAST")``, ``SQL()``
  4770. """
  4771. # field_name can be a path (for properties by example)
  4772. fname = field_name.split('.', 1)[0] if '.' in field_name else field_name
  4773. field = self._fields.get(fname)
  4774. if not field:
  4775. raise ValueError(f"Invalid field {fname!r} on model {self._name!r}")
  4776. if field.type == 'many2one':
  4777. seen = self.env.context.get('__m2o_order_seen', ())
  4778. if field in seen:
  4779. return SQL()
  4780. self = self.with_context(__m2o_order_seen=frozenset((field, *seen)))
  4781. # figure out the applicable order_by for the m2o
  4782. # special case: ordering by "x_id.id" doesn't recurse on x_id's comodel
  4783. comodel = self.env[field.comodel_name]
  4784. if field_name.endswith('.id'):
  4785. coorder = 'id'
  4786. sql_field = self._field_to_sql(alias, fname, query)
  4787. else:
  4788. coorder = comodel._order
  4789. sql_field = self._field_to_sql(alias, field_name, query)
  4790. if coorder == 'id':
  4791. if query.groupby:
  4792. query.groupby = SQL('%s, %s', query.groupby, sql_field)
  4793. return SQL("%s %s %s", sql_field, direction, nulls)
  4794. # instead of ordering by the field's raw value, use the comodel's
  4795. # order on many2one values
  4796. terms = []
  4797. if nulls.code == 'NULLS FIRST':
  4798. terms.append(SQL("%s IS NOT NULL", self._field_to_sql(alias, field_name, query)))
  4799. elif nulls.code == 'NULLS LAST':
  4800. terms.append(SQL("%s IS NULL", self._field_to_sql(alias, field_name, query)))
  4801. # LEFT JOIN the comodel table, in order to include NULL values, too
  4802. coalias = query.make_alias(alias, field_name)
  4803. query.add_join('LEFT JOIN', coalias, comodel._table, SQL(
  4804. "%s = %s",
  4805. sql_field,
  4806. SQL.identifier(coalias, 'id'),
  4807. ))
  4808. # delegate the order to the comodel
  4809. reverse = direction.code == 'DESC'
  4810. term = comodel._order_to_sql(coorder, query, alias=coalias, reverse=reverse)
  4811. if term:
  4812. terms.append(term)
  4813. return SQL(", ").join(terms)
  4814. sql_field = self._field_to_sql(alias, field_name, query)
  4815. if field.type == 'boolean':
  4816. sql_field = SQL("COALESCE(%s, FALSE)", sql_field)
  4817. if query.groupby:
  4818. query.groupby = SQL('%s, %s', query.groupby, sql_field)
  4819. return SQL("%s %s %s", sql_field, direction, nulls)
  4820. @api.model
  4821. def _flush_search(self, domain, fields=None, order=None, seen=None):
  4822. """ Flush all the fields appearing in `domain`, `fields` and `order`.
  4823. Note that ``order=None`` actually means no order, so if you expect some
  4824. fallback order, you have to provide it yourself.
  4825. """
  4826. warnings.warn("Since 18.0, _flush_search are deprecated")
  4827. if seen is None:
  4828. seen = set()
  4829. elif self._name in seen:
  4830. return
  4831. seen.add(self._name)
  4832. to_flush = defaultdict(OrderedSet) # {model_name: field_names}
  4833. if fields:
  4834. to_flush[self._name].update(fields)
  4835. def collect_from_domain(model, domain):
  4836. for arg in domain:
  4837. if isinstance(arg, str):
  4838. continue
  4839. if not isinstance(arg[0], str):
  4840. continue
  4841. comodel = collect_from_path(model, arg[0])
  4842. if arg[1] in ('child_of', 'parent_of') and comodel._parent_store:
  4843. # hierarchy operators need the parent field
  4844. collect_from_path(comodel, comodel._parent_name)
  4845. if arg[1] in ('any', 'not any'):
  4846. collect_from_domain(comodel, arg[2])
  4847. def collect_from_path(model, path):
  4848. # path is a dot-separated sequence of field names
  4849. for fname in path.split('.'):
  4850. field = model._fields.get(fname)
  4851. if not field:
  4852. break
  4853. to_flush[model._name].add(fname)
  4854. if field.type == 'one2many' and field.inverse_name:
  4855. to_flush[field.comodel_name].add(field.inverse_name)
  4856. field_domain = field.get_domain_list(model)
  4857. if field_domain:
  4858. collect_from_domain(self.env[field.comodel_name], field_domain)
  4859. # DLE P111: `test_message_process_email_partner_find`
  4860. # Search on res.users with email_normalized in domain
  4861. # must trigger the recompute and flush of res.partner.email_normalized
  4862. if field.related:
  4863. # DLE P129: `test_transit_multi_companies`
  4864. # `self.env['stock.picking'].search([('product_id', '=', product.id)])`
  4865. # Should flush `stock.move.picking_ids` as `product_id` on `stock.picking` is defined as:
  4866. # `product_id = fields.Many2one('product.product', 'Product', related='move_lines.product_id', readonly=False)`
  4867. collect_from_path(model, field.related)
  4868. if field.relational:
  4869. model = self.env[field.comodel_name]
  4870. # return the model found by traversing all fields (used in collect_from_domain)
  4871. return model
  4872. # flush the order fields
  4873. if order:
  4874. for order_part in order.split(','):
  4875. order_field = order_part.split()[0]
  4876. field = self._fields.get(order_field)
  4877. if field is not None:
  4878. to_flush[self._name].add(order_field)
  4879. if field.relational:
  4880. comodel = self.env[field.comodel_name]
  4881. comodel._flush_search([], order=comodel._order, seen=seen)
  4882. if self._active_name and self.env.context.get('active_test', True):
  4883. to_flush[self._name].add(self._active_name)
  4884. collect_from_domain(self, domain)
  4885. # Check access of fields with groups
  4886. for model_name, field_names in to_flush.items():
  4887. self.env[model_name].check_field_access_rights('read', field_names)
  4888. # also take into account the fields in the record rules
  4889. if ir_rule_domain := self.env['ir.rule']._compute_domain(self._name, 'read'):
  4890. collect_from_domain(self, ir_rule_domain)
  4891. # flush model dependencies (recursively)
  4892. if self._depends:
  4893. models = [self]
  4894. while models:
  4895. model = models.pop()
  4896. for model_name, field_names in model._depends.items():
  4897. to_flush[model_name].update(field_names)
  4898. models.append(self.env[model_name])
  4899. for model_name, field_names in to_flush.items():
  4900. self.env[model_name].flush_model(field_names)
  4901. @api.model
  4902. def _search(self, domain, offset=0, limit=None, order=None) -> Query:
  4903. """
  4904. Private implementation of search() method.
  4905. No default order is applied when the method is invoked without parameter ``order``.
  4906. :return: a :class:`Query` object that represents the matching records
  4907. This method may be overridden to modify the domain being searched, or to
  4908. do some post-filtering of the resulting query object. Be careful with
  4909. the latter option, though, as it might hurt performance. Indeed, by
  4910. default the returned query object is not actually executed, and it can
  4911. be injected as a value in a domain in order to generate sub-queries.
  4912. """
  4913. self.browse().check_access('read')
  4914. if expression.is_false(self, domain):
  4915. # optimization: no need to query, as no record satisfies the domain
  4916. return self.browse()._as_query()
  4917. query = self._where_calc(domain)
  4918. self._apply_ir_rules(query, 'read')
  4919. if order:
  4920. query.order = self._order_to_sql(order, query)
  4921. query.limit = limit
  4922. query.offset = offset
  4923. return query
  4924. def _as_query(self, ordered=True):
  4925. """ Return a :class:`Query` that corresponds to the recordset ``self``.
  4926. This method is convenient for making a query object with a known result.
  4927. :param ordered: whether the recordset order must be enforced by the query
  4928. """
  4929. query = Query(self.env, self._table, self._table_sql)
  4930. query.set_result_ids(self._ids, ordered)
  4931. return query
  4932. def copy_data(self, default=None):
  4933. """
  4934. Copy given record's data with all its fields values
  4935. :param default: field values to override in the original values of the copied record
  4936. :return: list of dictionaries containing all the field values
  4937. """
  4938. vals_list = []
  4939. default = dict(default or {})
  4940. # avoid recursion through already copied records in case of circular relationship
  4941. if '__copy_data_seen' not in self._context:
  4942. self = self.with_context(__copy_data_seen=defaultdict(set))
  4943. # build a black list of fields that should not be copied
  4944. blacklist = set(MAGIC_COLUMNS + ['parent_path'])
  4945. whitelist = set(name for name, field in self._fields.items() if not field.inherited)
  4946. def blacklist_given_fields(model):
  4947. # blacklist the fields that are given by inheritance
  4948. for parent_model, parent_field in model._inherits.items():
  4949. blacklist.add(parent_field)
  4950. if parent_field in default:
  4951. # all the fields of 'parent_model' are given by the record:
  4952. # default[parent_field], except the ones redefined in self
  4953. blacklist.update(set(self.env[parent_model]._fields) - whitelist)
  4954. else:
  4955. blacklist_given_fields(self.env[parent_model])
  4956. blacklist_given_fields(self)
  4957. fields_to_copy = {name: field
  4958. for name, field in self._fields.items()
  4959. if field.copy and name not in default and name not in blacklist}
  4960. for record in self:
  4961. seen_map = self._context['__copy_data_seen']
  4962. if record.id in seen_map[record._name]:
  4963. vals_list.append(None)
  4964. continue
  4965. seen_map[record._name].add(record.id)
  4966. vals = default.copy()
  4967. for name, field in fields_to_copy.items():
  4968. if field.type == 'one2many':
  4969. # duplicate following the order of the ids because we'll rely on
  4970. # it later for copying translations in copy_translation()!
  4971. lines = record[name].sorted(key='id').copy_data()
  4972. # the lines are duplicated using the wrong (old) parent, but then are
  4973. # reassigned to the correct one thanks to the (Command.CREATE, 0, ...)
  4974. vals[name] = [Command.create(line) for line in lines if line]
  4975. elif field.type == 'many2many':
  4976. vals[name] = [Command.set(record[name].ids)]
  4977. else:
  4978. vals[name] = field.convert_to_write(record[name], record)
  4979. vals_list.append(vals)
  4980. return vals_list
  4981. def copy_translations(self, new, excluded=()):
  4982. """ Recursively copy the translations from original to new record
  4983. :param self: the original record
  4984. :param new: the new record (copy of the original one)
  4985. :param excluded: a container of user-provided field names
  4986. """
  4987. old = self
  4988. # avoid recursion through already copied records in case of circular relationship
  4989. if '__copy_translations_seen' not in old._context:
  4990. old = old.with_context(__copy_translations_seen=defaultdict(set))
  4991. seen_map = old._context['__copy_translations_seen']
  4992. if old.id in seen_map[old._name]:
  4993. return
  4994. seen_map[old._name].add(old.id)
  4995. valid_langs = set(code for code, _ in self.env['res.lang'].get_installed()) | {'en_US'}
  4996. for name, field in old._fields.items():
  4997. if not field.copy:
  4998. continue
  4999. if field.inherited and field.related.split('.')[0] in excluded:
  5000. # inherited fields that come from a user-provided parent record
  5001. # must not copy translations, as the parent record is not a copy
  5002. # of the old parent record
  5003. continue
  5004. if field.type == 'one2many' and field.name not in excluded:
  5005. # we must recursively copy the translations for o2m; here we
  5006. # rely on the order of the ids to match the translations as
  5007. # foreseen in copy_data()
  5008. old_lines = old[name].sorted(key='id')
  5009. new_lines = new[name].sorted(key='id')
  5010. for (old_line, new_line) in zip(old_lines, new_lines):
  5011. # don't pass excluded as it is not about those lines
  5012. old_line.copy_translations(new_line)
  5013. elif field.translate and field.store and name not in excluded and old[name]:
  5014. # for translatable fields we copy their translations
  5015. old_stored_translations = field._get_stored_translations(old)
  5016. if not old_stored_translations:
  5017. continue
  5018. lang = self.env.lang or 'en_US'
  5019. if field.translate is True:
  5020. new.update_field_translations(name, {
  5021. k: v for k, v in old_stored_translations.items() if k in valid_langs and k != lang
  5022. })
  5023. else:
  5024. old_translations = {
  5025. k: old_stored_translations.get(f'_{k}', v)
  5026. for k, v in old_stored_translations.items()
  5027. if k in valid_langs
  5028. }
  5029. # {from_lang_term: {lang: to_lang_term}
  5030. translation_dictionary = field.get_translation_dictionary(
  5031. old_translations.pop(lang, old_translations['en_US']),
  5032. old_translations
  5033. )
  5034. # {lang: {old_term: new_term}}
  5035. translations = defaultdict(dict)
  5036. for from_lang_term, to_lang_terms in translation_dictionary.items():
  5037. for lang, to_lang_term in to_lang_terms.items():
  5038. translations[lang][from_lang_term] = to_lang_term
  5039. new.update_field_translations(name, translations)
  5040. @api.returns('self')
  5041. def copy(self, default: ValuesType | None = None) -> Self:
  5042. """ copy(default=None)
  5043. Duplicate record ``self`` updating it with default values
  5044. :param dict default: dictionary of field values to override in the
  5045. original values of the copied record, e.g: ``{'field_name': overridden_value, ...}``
  5046. :returns: new records
  5047. """
  5048. vals_list = self.with_context(active_test=False).copy_data(default)
  5049. new_records = self.create(vals_list)
  5050. for old_record, new_record in zip(self, new_records):
  5051. old_record.copy_translations(new_record, excluded=default or ())
  5052. return new_records
  5053. @api.returns('self')
  5054. def exists(self) -> Self:
  5055. """ exists() -> records
  5056. Returns the subset of records in ``self`` that exist.
  5057. It can be used as a test on records::
  5058. if record.exists():
  5059. ...
  5060. By convention, new records are returned as existing.
  5061. """
  5062. new_ids, ids = partition(lambda i: isinstance(i, NewId), self._ids)
  5063. if not ids:
  5064. return self
  5065. query = Query(self.env, self._table, self._table_sql)
  5066. query.add_where(SQL("%s IN %s", SQL.identifier(self._table, 'id'), tuple(ids)))
  5067. self.env.cr.execute(query.select())
  5068. valid_ids = set([r[0] for r in self._cr.fetchall()] + new_ids)
  5069. return self.browse(i for i in self._ids if i in valid_ids)
  5070. def _has_cycle(self, field_name=None) -> bool:
  5071. """
  5072. Return whether the records in ``self`` are in a loop by following the
  5073. given relationship of the field.
  5074. By default the **parent** field is used as the relationship.
  5075. Note that since the method does not use EXCLUSIVE LOCK for the sake of
  5076. performance, loops may still be created by concurrent transactions.
  5077. :param field_name: optional field name (default: ``self._parent_name``)
  5078. :return: **True** if a loop was found, **False** otherwise.
  5079. """
  5080. if not field_name:
  5081. field_name = self._parent_name
  5082. field = self._fields.get(field_name)
  5083. if not field:
  5084. raise ValueError(f'Invalid field_name: {field_name!r}')
  5085. if not (
  5086. field.type in ('many2many', 'many2one')
  5087. and field.comodel_name == self._name
  5088. and field.store
  5089. ):
  5090. raise ValueError(f'Field must be a many2one or many2many relation on itself: {field_name!r}')
  5091. if not self.ids:
  5092. return False
  5093. # must ignore 'active' flag, ir.rules, etc.
  5094. # direct recursive SQL query with cycle detection for performance
  5095. self.flush_model([field_name])
  5096. if field.type == 'many2many':
  5097. relation = field.relation
  5098. column1 = field.column1
  5099. column2 = field.column2
  5100. else:
  5101. relation = self._table
  5102. column1 = 'id'
  5103. column2 = field_name
  5104. cr = self._cr
  5105. cr.execute(SQL(
  5106. """
  5107. WITH RECURSIVE __reachability AS (
  5108. SELECT %(col1)s AS source, %(col2)s AS destination
  5109. FROM %(rel)s
  5110. WHERE %(col1)s IN %(ids)s AND %(col2)s IS NOT NULL
  5111. UNION
  5112. SELECT r.source, t.%(col2)s
  5113. FROM __reachability r
  5114. JOIN %(rel)s t ON r.destination = t.%(col1)s AND t.%(col2)s IS NOT NULL
  5115. )
  5116. SELECT 1 FROM __reachability
  5117. WHERE source = destination
  5118. LIMIT 1
  5119. """,
  5120. ids=tuple(self.ids),
  5121. rel=SQL.identifier(relation),
  5122. col1=SQL.identifier(column1),
  5123. col2=SQL.identifier(column2),
  5124. ))
  5125. return bool(cr.fetchone())
  5126. def _check_recursion(self, parent=None):
  5127. warnings.warn("Since 18.0, one must use not _has_cycle() instead", DeprecationWarning, 2)
  5128. return not self._has_cycle(parent)
  5129. def _check_m2m_recursion(self, field_name):
  5130. warnings.warn("Since 18.0, one must use not _has_cycle() instead", DeprecationWarning, 2)
  5131. return not self._has_cycle(field_name)
  5132. def _get_external_ids(self):
  5133. """Retrieve the External ID(s) of any database record.
  5134. **Synopsis**: ``_get_external_ids() -> { 'id': ['module.external_id'] }``
  5135. :return: map of ids to the list of their fully qualified External IDs
  5136. in the form ``module.key``, or an empty list when there's no External
  5137. ID for a record, e.g.::
  5138. { 'id': ['module.ext_id', 'module.ext_id_bis'],
  5139. 'id2': [] }
  5140. """
  5141. result = defaultdict(list)
  5142. domain = [('model', '=', self._name), ('res_id', 'in', self.ids)]
  5143. for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id'], order='id'):
  5144. result[data['res_id']].append('%(module)s.%(name)s' % data)
  5145. return {
  5146. record.id: result[record._origin.id]
  5147. for record in self
  5148. }
  5149. def get_external_id(self):
  5150. """Retrieve the External ID of any database record, if there
  5151. is one. This method works as a possible implementation
  5152. for a function field, to be able to add it to any
  5153. model object easily, referencing it as ``Model.get_external_id``.
  5154. When multiple External IDs exist for a record, only one
  5155. of them is returned (randomly).
  5156. :return: map of ids to their fully qualified XML ID,
  5157. defaulting to an empty string when there's none
  5158. (to be usable as a function field),
  5159. e.g.::
  5160. { 'id': 'module.ext_id',
  5161. 'id2': '' }
  5162. """
  5163. results = self._get_external_ids()
  5164. return {key: val[0] if val else ''
  5165. for key, val in results.items()}
  5166. @classmethod
  5167. def is_transient(cls):
  5168. """ Return whether the model is transient.
  5169. See :class:`TransientModel`.
  5170. """
  5171. return cls._transient
  5172. @api.model
  5173. @api.readonly
  5174. def search_read(self, domain=None, fields=None, offset=0, limit=None, order=None, **read_kwargs):
  5175. """ Perform a :meth:`search_fetch` followed by a :meth:`_read_format`.
  5176. :param domain: Search domain, see ``args`` parameter in :meth:`search`.
  5177. Defaults to an empty domain that will match all records.
  5178. :param fields: List of fields to read, see ``fields`` parameter in :meth:`read`.
  5179. Defaults to all fields.
  5180. :param int offset: Number of records to skip, see ``offset`` parameter in :meth:`search`.
  5181. Defaults to 0.
  5182. :param int limit: Maximum number of records to return, see ``limit`` parameter in :meth:`search`.
  5183. Defaults to no limit.
  5184. :param order: Columns to sort result, see ``order`` parameter in :meth:`search`.
  5185. Defaults to no sort.
  5186. :param read_kwargs: All read keywords arguments used to call
  5187. ``read(..., **read_kwargs)`` method e.g. you can use
  5188. ``search_read(..., load='')`` in order to avoid computing display_name
  5189. :return: List of dictionaries containing the asked fields.
  5190. :rtype: list(dict).
  5191. """
  5192. fields = self.check_field_access_rights('read', fields)
  5193. records = self.search_fetch(domain or [], fields, offset=offset, limit=limit, order=order)
  5194. # Method _read_format() ignores 'active_test', but it would forward it
  5195. # to any downstream search call(e.g. for x2m or computed fields), and
  5196. # this is not the desired behavior. The flag was presumably only meant
  5197. # for the main search().
  5198. if 'active_test' in self._context:
  5199. context = dict(self._context)
  5200. del context['active_test']
  5201. records = records.with_context(context)
  5202. return records._read_format(fnames=fields, **read_kwargs)
  5203. def toggle_active(self):
  5204. "Inverses the value of :attr:`active` on the records in ``self``."
  5205. assert self._active_name, f"No 'active' field on model {self._name}"
  5206. active_recs = self.filtered(self._active_name)
  5207. active_recs[self._active_name] = False
  5208. (self - active_recs)[self._active_name] = True
  5209. def action_archive(self):
  5210. """Sets :attr:`active` to ``False`` on a recordset, by calling
  5211. :meth:`toggle_active` on its currently active records.
  5212. """
  5213. assert self._active_name, f"No 'active' field on model {self._name}"
  5214. return self.filtered(lambda record: record[self._active_name]).toggle_active()
  5215. def action_unarchive(self):
  5216. """Sets :attr:`active` to ``True`` on a recordset, by calling
  5217. :meth:`toggle_active` on its currently inactive records.
  5218. """
  5219. assert self._active_name, f"No 'active' field on model {self._name}"
  5220. return self.filtered(lambda record: not record[self._active_name]).toggle_active()
  5221. def _register_hook(self):
  5222. """ stuff to do right after the registry is built """
  5223. def _unregister_hook(self):
  5224. """ Clean up what `~._register_hook` has done. """
  5225. def _get_redirect_suggested_company(self):
  5226. """Return the suggested company to be set on the context
  5227. in case of a URL redirection to the record. To avoid multi
  5228. company issues when clicking on a shared link, this
  5229. could be called to try setting the most suited company on
  5230. the allowed_company_ids in the context. This method can be
  5231. overridden, for example on the hr.leave model, where the
  5232. most suited company is the company of the leave type, as
  5233. specified by the ir.rule.
  5234. """
  5235. if 'company_id' in self:
  5236. return self.company_id
  5237. elif 'company_ids' in self:
  5238. return (self.company_ids & self.env.user.company_ids)[:1]
  5239. return False
  5240. #
  5241. # Instance creation
  5242. #
  5243. # An instance represents an ordered collection of records in a given
  5244. # execution environment. The instance object refers to the environment, and
  5245. # the records themselves are represented by their cache dictionary. The 'id'
  5246. # of each record is found in its corresponding cache dictionary.
  5247. #
  5248. # This design has the following advantages:
  5249. # - cache access is direct and thus fast;
  5250. # - one can consider records without an 'id' (see new records);
  5251. # - the global cache is only an index to "resolve" a record 'id'.
  5252. #
  5253. def __init__(self, env: api.Environment, ids: tuple[IdType, ...], prefetch_ids: Reversible[IdType]):
  5254. """ Create a recordset instance.
  5255. :param env: an environment
  5256. :param ids: a tuple of record ids
  5257. :param prefetch_ids: a reversible iterable of record ids (for prefetching)
  5258. """
  5259. self.env = env
  5260. self._ids = ids
  5261. self._prefetch_ids = prefetch_ids
  5262. def browse(self, ids: int | typing.Iterable[IdType] = ()) -> Self:
  5263. """ browse([ids]) -> records
  5264. Returns a recordset for the ids provided as parameter in the current
  5265. environment.
  5266. .. code-block:: python
  5267. self.browse([7, 18, 12])
  5268. res.partner(7, 18, 12)
  5269. :param ids: id(s)
  5270. :type ids: int or iterable(int) or None
  5271. :return: recordset
  5272. """
  5273. if not ids:
  5274. ids = ()
  5275. elif ids.__class__ is int:
  5276. ids = (ids,)
  5277. else:
  5278. ids = tuple(ids)
  5279. return self.__class__(self.env, ids, ids)
  5280. #
  5281. # Internal properties, for manipulating the instance's implementation
  5282. #
  5283. @property
  5284. def ids(self) -> list[int]:
  5285. """ Return the list of actual record ids corresponding to ``self``. """
  5286. return list(origin_ids(self._ids))
  5287. # backward-compatibility with former browse records
  5288. _cr = property(lambda self: self.env.cr)
  5289. _uid = property(lambda self: self.env.uid)
  5290. _context = property(lambda self: self.env.context)
  5291. #
  5292. # Conversion methods
  5293. #
  5294. def ensure_one(self) -> Self:
  5295. """Verify that the current recordset holds a single record.
  5296. :raise odoo.exceptions.ValueError: ``len(self) != 1``
  5297. """
  5298. try:
  5299. # unpack to ensure there is only one value is faster than len when true and
  5300. # has a significant impact as this check is largely called
  5301. _id, = self._ids
  5302. return self
  5303. except ValueError:
  5304. raise ValueError("Expected singleton: %s" % self)
  5305. def with_env(self, env: api.Environment) -> Self:
  5306. """Return a new version of this recordset attached to the provided environment.
  5307. :param env:
  5308. :type env: :class:`~odoo.api.Environment`
  5309. .. note::
  5310. The returned recordset has the same prefetch object as ``self``.
  5311. """
  5312. return self.__class__(env, self._ids, self._prefetch_ids)
  5313. def sudo(self, flag=True) -> Self:
  5314. """ sudo([flag=True])
  5315. Returns a new version of this recordset with superuser mode enabled or
  5316. disabled, depending on `flag`. The superuser mode does not change the
  5317. current user, and simply bypasses access rights checks.
  5318. .. warning::
  5319. Using ``sudo`` could cause data access to cross the
  5320. boundaries of record rules, possibly mixing records that
  5321. are meant to be isolated (e.g. records from different
  5322. companies in multi-company environments).
  5323. It may lead to un-intuitive results in methods which select one
  5324. record among many - for example getting the default company, or
  5325. selecting a Bill of Materials.
  5326. .. note::
  5327. The returned recordset has the same prefetch object as ``self``.
  5328. """
  5329. assert isinstance(flag, bool)
  5330. if flag == self.env.su:
  5331. return self
  5332. return self.with_env(self.env(su=flag))
  5333. def with_user(self, user) -> Self:
  5334. """ with_user(user)
  5335. Return a new version of this recordset attached to the given user, in
  5336. non-superuser mode, unless `user` is the superuser (by convention, the
  5337. superuser is always in superuser mode.)
  5338. """
  5339. if not user:
  5340. return self
  5341. return self.with_env(self.env(user=user, su=False))
  5342. def with_company(self, company) -> Self:
  5343. """ with_company(company)
  5344. Return a new version of this recordset with a modified context, such that::
  5345. result.env.company = company
  5346. result.env.companies = self.env.companies | company
  5347. :param company: main company of the new environment.
  5348. :type company: :class:`~odoo.addons.base.models.res_company` or int
  5349. .. warning::
  5350. When using an unauthorized company for current user,
  5351. accessing the company(ies) on the environment may trigger
  5352. an AccessError if not done in a sudoed environment.
  5353. """
  5354. if not company:
  5355. # With company = None/False/0/[]/empty recordset: keep current environment
  5356. return self
  5357. company_id = int(company)
  5358. allowed_company_ids = self.env.context.get('allowed_company_ids') or []
  5359. if allowed_company_ids and company_id == allowed_company_ids[0]:
  5360. return self
  5361. # Copy the allowed_company_ids list
  5362. # to avoid modifying the context of the current environment.
  5363. allowed_company_ids = list(allowed_company_ids)
  5364. if company_id in allowed_company_ids:
  5365. allowed_company_ids.remove(company_id)
  5366. allowed_company_ids.insert(0, company_id)
  5367. return self.with_context(allowed_company_ids=allowed_company_ids)
  5368. def with_context(self, *args, **kwargs) -> Self:
  5369. """ with_context([context][, **overrides]) -> Model
  5370. Returns a new version of this recordset attached to an extended
  5371. context.
  5372. The extended context is either the provided ``context`` in which
  5373. ``overrides`` are merged or the *current* context in which
  5374. ``overrides`` are merged e.g.::
  5375. # current context is {'key1': True}
  5376. r2 = records.with_context({}, key2=True)
  5377. # -> r2._context is {'key2': True}
  5378. r2 = records.with_context(key2=True)
  5379. # -> r2._context is {'key1': True, 'key2': True}
  5380. .. note:
  5381. The returned recordset has the same prefetch object as ``self``.
  5382. """ # noqa: RST210
  5383. if (args and 'force_company' in args[0]) or 'force_company' in kwargs:
  5384. _logger.warning(
  5385. "Context key 'force_company' is no longer supported. "
  5386. "Use with_company(company) instead.",
  5387. stack_info=True,
  5388. )
  5389. if (args and 'company' in args[0]) or 'company' in kwargs:
  5390. _logger.warning(
  5391. "Context key 'company' is not recommended, because "
  5392. "of its special meaning in @depends_context.",
  5393. stack_info=True,
  5394. )
  5395. context = dict(args[0] if args else self._context, **kwargs)
  5396. if 'allowed_company_ids' not in context and 'allowed_company_ids' in self._context:
  5397. # Force 'allowed_company_ids' to be kept when context is overridden
  5398. # without 'allowed_company_ids'
  5399. context['allowed_company_ids'] = self._context['allowed_company_ids']
  5400. return self.with_env(self.env(context=context))
  5401. def with_prefetch(self, prefetch_ids=None) -> Self:
  5402. """ with_prefetch([prefetch_ids]) -> records
  5403. Return a new version of this recordset that uses the given prefetch ids,
  5404. or ``self``'s ids if not given.
  5405. """
  5406. if prefetch_ids is None:
  5407. prefetch_ids = self._ids
  5408. return self.__class__(self.env, self._ids, prefetch_ids)
  5409. def _update_cache(self, values, validate=True):
  5410. """ Update the cache of ``self`` with ``values``.
  5411. :param values: dict of field values, in any format.
  5412. :param validate: whether values must be checked
  5413. """
  5414. self.ensure_one()
  5415. cache = self.env.cache
  5416. fields = self._fields
  5417. try:
  5418. field_values = [(fields[name], value) for name, value in values.items() if name != 'id']
  5419. except KeyError as e:
  5420. raise ValueError("Invalid field %r on model %r" % (e.args[0], self._name))
  5421. # convert monetary fields after other columns for correct value rounding
  5422. for field, value in sorted(field_values, key=lambda item: item[0].write_sequence):
  5423. value = field.convert_to_cache(value, self, validate)
  5424. cache.set(self, field, value, check_dirty=False)
  5425. # set inverse fields on new records in the comodel
  5426. if field.relational:
  5427. inv_recs = self[field.name].filtered(lambda r: not r.id)
  5428. if not inv_recs:
  5429. continue
  5430. # we need to adapt the value of the inverse fields to integrate self into it:
  5431. # x2many fields should add self, while many2one fields should replace with self
  5432. for invf in self.pool.field_inverses[field]:
  5433. invf._update(inv_recs, self)
  5434. def _convert_to_record(self, values):
  5435. """ Convert the ``values`` dictionary from the cache format to the
  5436. record format.
  5437. """
  5438. return {
  5439. name: self._fields[name].convert_to_record(value, self)
  5440. for name, value in values.items()
  5441. }
  5442. def _convert_to_write(self, values):
  5443. """ Convert the ``values`` dictionary into the format of :meth:`write`. """
  5444. fields = self._fields
  5445. result = {}
  5446. for name, value in values.items():
  5447. if name in fields:
  5448. field = fields[name]
  5449. value = field.convert_to_write(value, self)
  5450. if not isinstance(value, NewId):
  5451. result[name] = value
  5452. return result
  5453. #
  5454. # Record traversal and update
  5455. #
  5456. def _mapped_func(self, func):
  5457. """ Apply function ``func`` on all records in ``self``, and return the
  5458. result as a list or a recordset (if ``func`` returns recordsets).
  5459. """
  5460. if self:
  5461. vals = [func(rec) for rec in self]
  5462. if isinstance(vals[0], BaseModel):
  5463. return vals[0].union(*vals) # union of all recordsets
  5464. return vals
  5465. else:
  5466. vals = func(self)
  5467. return vals if isinstance(vals, BaseModel) else []
  5468. def mapped(self, func):
  5469. """Apply ``func`` on all records in ``self``, and return the result as a
  5470. list or a recordset (if ``func`` return recordsets). In the latter
  5471. case, the order of the returned recordset is arbitrary.
  5472. :param func: a function or a dot-separated sequence of field names
  5473. :type func: callable or str
  5474. :return: self if func is falsy, result of func applied to all ``self`` records.
  5475. :rtype: list or recordset
  5476. .. code-block:: python3
  5477. # returns a list of summing two fields for each record in the set
  5478. records.mapped(lambda r: r.field1 + r.field2)
  5479. The provided function can be a string to get field values:
  5480. .. code-block:: python3
  5481. # returns a list of names
  5482. records.mapped('name')
  5483. # returns a recordset of partners
  5484. records.mapped('partner_id')
  5485. # returns the union of all partner banks, with duplicates removed
  5486. records.mapped('partner_id.bank_ids')
  5487. """
  5488. if not func:
  5489. return self # support for an empty path of fields
  5490. if isinstance(func, str):
  5491. recs = self
  5492. for name in func.split('.'):
  5493. recs = recs._fields[name].mapped(recs)
  5494. return recs
  5495. else:
  5496. return self._mapped_func(func)
  5497. def filtered(self, func) -> Self:
  5498. """Return the records in ``self`` satisfying ``func``.
  5499. :param func: a function or a dot-separated sequence of field names
  5500. :type func: callable or str
  5501. :return: recordset of records satisfying func, may be empty.
  5502. .. code-block:: python3
  5503. # only keep records whose company is the current user's
  5504. records.filtered(lambda r: r.company_id == user.company_id)
  5505. # only keep records whose partner is a company
  5506. records.filtered("partner_id.is_company")
  5507. """
  5508. if isinstance(func, str):
  5509. if '.' in func:
  5510. return self.browse(rec.id for rec in self if any(rec.mapped(func)))
  5511. else: # Avoid costly mapped
  5512. return self.browse(rec.id for rec in self if rec[func])
  5513. return self.browse(rec.id for rec in self if func(rec))
  5514. def grouped(self, key):
  5515. """Eagerly groups the records of ``self`` by the ``key``, returning a
  5516. dict from the ``key``'s result to recordsets. All the resulting
  5517. recordsets are guaranteed to be part of the same prefetch-set.
  5518. Provides a convenience method to partition existing recordsets without
  5519. the overhead of a :meth:`~.read_group`, but performs no aggregation.
  5520. .. note:: unlike :func:`itertools.groupby`, does not care about input
  5521. ordering, however the tradeoff is that it can not be lazy
  5522. :param key: either a callable from a :class:`Model` to a (hashable)
  5523. value, or a field name. In the latter case, it is equivalent
  5524. to ``itemgetter(key)`` (aka the named field's value)
  5525. :type key: callable | str
  5526. :rtype: dict
  5527. """
  5528. if isinstance(key, str):
  5529. key = itemgetter(key)
  5530. collator = defaultdict(list)
  5531. for record in self:
  5532. collator[key(record)].extend(record._ids)
  5533. browse = functools.partial(type(self), self.env, prefetch_ids=self._prefetch_ids)
  5534. return {key: browse(tuple(ids)) for key, ids in collator.items()}
  5535. def filtered_domain(self, domain) -> Self:
  5536. """Return the records in ``self`` satisfying the domain and keeping the same order.
  5537. :param domain: :ref:`A search domain <reference/orm/domains>`.
  5538. """
  5539. if not domain or not self:
  5540. return self
  5541. stack = []
  5542. for leaf in reversed(domain):
  5543. if leaf == '|':
  5544. stack.append(stack.pop() | stack.pop())
  5545. elif leaf == '!':
  5546. stack.append(set(self._ids) - stack.pop())
  5547. elif leaf == '&':
  5548. stack.append(stack.pop() & stack.pop())
  5549. elif leaf == expression.TRUE_LEAF:
  5550. stack.append(set(self._ids))
  5551. elif leaf == expression.FALSE_LEAF:
  5552. stack.append(set())
  5553. else:
  5554. (key, comparator, value) = leaf
  5555. if comparator in ('child_of', 'parent_of'):
  5556. if key in ['company_id', 'company_ids']: # avoid an explicit search
  5557. value_companies = self.env['res.company'].browse(value)
  5558. if comparator == 'child_of':
  5559. stack.append({record.id for record in self if record[key].parent_ids & value_companies})
  5560. else:
  5561. stack.append({record.id for record in self if record[key] & value_companies.parent_ids})
  5562. else:
  5563. stack.append(set(self.with_context(active_test=False).search([('id', 'in', self.ids), leaf], order='id')._ids))
  5564. continue
  5565. # determine the field with the final type for values
  5566. if key.endswith('.id'):
  5567. key = key[:-3]
  5568. if '.' in key:
  5569. fname, rest = key.split('.', 1)
  5570. field = self._fields[fname]
  5571. if field.relational:
  5572. # for relational fields, evaluate as 'any'
  5573. # so that negations are applied on the result of 'any' instead
  5574. # of on the mapped value
  5575. key, comparator, value = fname, 'any', [(rest, comparator, value)]
  5576. else:
  5577. field = self._fields[key]
  5578. if key == 'id':
  5579. key = ''
  5580. if comparator in ('like', 'ilike', '=like', '=ilike', 'not ilike', 'not like'):
  5581. if comparator.endswith('ilike'):
  5582. # ilike uses unaccent and lower-case comparison
  5583. # we may get something which is not a string
  5584. def unaccent(x):
  5585. return self.pool.unaccent_python(str(x).lower()) if x else ''
  5586. else:
  5587. def unaccent(x):
  5588. return str(x) if x else ''
  5589. # build a regex that matches the SQL-like expression
  5590. # note that '\' is used for escaping in SQL
  5591. def build_like_regex(value: str, exact: bool):
  5592. yield '^' if exact else '.*'
  5593. escaped = False
  5594. for char in value:
  5595. if escaped:
  5596. escaped = False
  5597. yield re.escape(char)
  5598. elif char == '\\':
  5599. escaped = True
  5600. elif char == '%':
  5601. yield '.*'
  5602. elif char == '_':
  5603. yield '.'
  5604. else:
  5605. yield re.escape(char)
  5606. if exact:
  5607. yield '$'
  5608. # no need to match r'.*' in else because we only use .match()
  5609. like_regex = re.compile("".join(build_like_regex(unaccent(value), comparator.startswith("="))))
  5610. if comparator in ('=', '!=') and field.type in ('char', 'text', 'html') and not value:
  5611. # use the comparator 'in' for falsy comparison of strings
  5612. comparator = 'in' if comparator == '=' else 'not in'
  5613. value = ['', False]
  5614. if comparator in ('in', 'not in'):
  5615. if isinstance(value, (list, tuple)):
  5616. value = set(value)
  5617. else:
  5618. value = {value}
  5619. if field.type in ('date', 'datetime'):
  5620. value = {Datetime.to_datetime(v) for v in value}
  5621. elif field.type in ('char', 'text', 'html') and ({False, ""} & value):
  5622. # compare string to both False and ""
  5623. value |= {False, ""}
  5624. elif field.type in ('date', 'datetime'):
  5625. value = Datetime.to_datetime(value)
  5626. matching_ids = set()
  5627. for record in self:
  5628. data = record.mapped(key)
  5629. if isinstance(data, BaseModel) and comparator not in ('any', 'not any'):
  5630. v = value
  5631. if isinstance(value, (list, tuple, set)) and value:
  5632. v = next(iter(value))
  5633. if isinstance(v, str):
  5634. try:
  5635. data = data.mapped('display_name')
  5636. except AccessError:
  5637. # failed to access the record, return empty string for comparison
  5638. data = ['']
  5639. else:
  5640. data = data and data.ids or [False]
  5641. elif field.type in ('date', 'datetime'):
  5642. data = [Datetime.to_datetime(d) for d in data]
  5643. if comparator == '=':
  5644. ok = value in data
  5645. elif comparator == '!=':
  5646. ok = value not in data
  5647. elif comparator == '=?':
  5648. ok = not value or (value in data)
  5649. elif comparator == 'in':
  5650. ok = value and any(x in value for x in data)
  5651. elif comparator == 'not in':
  5652. ok = not (value and any(x in value for x in data))
  5653. elif comparator == '<':
  5654. ok = any(x is not False and x is not None and x < value for x in data)
  5655. elif comparator == '>':
  5656. ok = any(x is not False and x is not None and x > value for x in data)
  5657. elif comparator == '<=':
  5658. ok = any(x is not False and x is not None and x <= value for x in data)
  5659. elif comparator == '>=':
  5660. ok = any(x is not False and x is not None and x >= value for x in data)
  5661. elif comparator in ('like', 'ilike', '=like', '=ilike', 'not ilike', 'not like'):
  5662. ok = any(like_regex.match(unaccent(x)) for x in data)
  5663. if comparator.startswith('not'):
  5664. ok = not ok
  5665. elif comparator == 'any':
  5666. ok = data.filtered_domain(value)
  5667. elif comparator == 'not any':
  5668. ok = not data.filtered_domain(value)
  5669. else:
  5670. raise ValueError(f"Invalid term domain '{leaf}', operator '{comparator}' doesn't exist.")
  5671. if ok:
  5672. matching_ids.add(record.id)
  5673. stack.append(matching_ids)
  5674. while len(stack) > 1:
  5675. stack.append(stack.pop() & stack.pop())
  5676. [result_ids] = stack
  5677. return self.browse(id_ for id_ in self._ids if id_ in result_ids)
  5678. def sorted(self, key=None, reverse=False) -> Self:
  5679. """Return the recordset ``self`` ordered by ``key``.
  5680. :param key: either a function of one argument that returns a
  5681. comparison key for each record, or a field name, or ``None``, in
  5682. which case records are ordered according the default model's order
  5683. :type key: callable or str or None
  5684. :param bool reverse: if ``True``, return the result in reverse order
  5685. .. code-block:: python3
  5686. # sort records by name
  5687. records.sorted(key=lambda r: r.name)
  5688. """
  5689. if key is None:
  5690. if any(self._ids):
  5691. ids = self.search([('id', 'in', self.ids)])._ids
  5692. else: # Don't support new ids because search() doesn't work on new records
  5693. ids = self._ids
  5694. ids = tuple(reversed(ids)) if reverse else ids
  5695. else:
  5696. if isinstance(key, str):
  5697. key = itemgetter(key)
  5698. ids = tuple(item.id for item in sorted(self, key=key, reverse=reverse))
  5699. return self.__class__(self.env, ids, self._prefetch_ids)
  5700. def update(self, values):
  5701. """ Update the records in ``self`` with ``values``. """
  5702. for name, value in values.items():
  5703. self[name] = value
  5704. def flush_model(self, fnames=None):
  5705. """ Process the pending computations and database updates on ``self``'s
  5706. model. When the parameter is given, the method guarantees that at least
  5707. the given fields are flushed to the database. More fields can be
  5708. flushed, though.
  5709. :param fnames: optional iterable of field names to flush
  5710. """
  5711. self._recompute_model(fnames)
  5712. self._flush(fnames)
  5713. def flush_recordset(self, fnames=None):
  5714. """ Process the pending computations and database updates on the records
  5715. ``self``. When the parameter is given, the method guarantees that at
  5716. least the given fields on records ``self`` are flushed to the database.
  5717. More fields and records can be flushed, though.
  5718. :param fnames: optional iterable of field names to flush
  5719. """
  5720. self._recompute_recordset(fnames)
  5721. fields_ = None if fnames is None else (self._fields[fname] for fname in fnames)
  5722. if self.env.cache.has_dirty_fields(self, fields_):
  5723. self._flush(fnames)
  5724. def _flush(self, fnames=None):
  5725. if fnames is None:
  5726. fields = self._fields.values()
  5727. else:
  5728. fields = [self._fields[fname] for fname in fnames]
  5729. dirty_fields = self.env.cache.get_dirty_fields()
  5730. if not any(field in dirty_fields for field in fields):
  5731. return
  5732. # if any field is context-dependent, the values to flush should
  5733. # be found with a context where the context keys are all None
  5734. model = self.with_context({})
  5735. # pop dirty fields and their corresponding record ids from cache
  5736. dirty_field_ids = {
  5737. field: self.env.cache.clear_dirty_field(field)
  5738. for field in model._fields.values()
  5739. if field in dirty_fields
  5740. }
  5741. # Memory optimization: get a reference to each dirty field's cache.
  5742. # This avoids allocating extra memory for storing the data taken
  5743. # from cache. Beware that this breaks the cache abstraction!
  5744. dirty_field_cache = {
  5745. field: (
  5746. self.env.cache._get_field_cache(model, field)
  5747. if not field.company_dependent else
  5748. self.env.cache._get_grouped_company_dependent_field_cache(field)
  5749. )
  5750. for field in dirty_field_ids
  5751. }
  5752. # sort dirty record ids so that records with the same set of modified
  5753. # fields are grouped together; for that purpose, map each dirty id to
  5754. # an integer that represents its subset of dirty fields (bitmask)
  5755. dirty_ids = sorted(
  5756. OrderedSet(id_ for ids in dirty_field_ids.values() for id_ in ids),
  5757. key=lambda id_: sum(
  5758. 2 ** field_index
  5759. for field_index, ids in enumerate(dirty_field_ids.values())
  5760. if id_ in ids
  5761. ),
  5762. )
  5763. # perform updates in batches in order to limit memory footprint
  5764. BATCH_SIZE = 1000
  5765. for some_ids in split_every(BATCH_SIZE, dirty_ids):
  5766. vals_list = []
  5767. try:
  5768. for id_ in some_ids:
  5769. record = model.browse(id_)
  5770. vals_list.append({
  5771. f.name: f.convert_to_column_update(dirty_field_cache[f][id_], record)
  5772. for f, ids in dirty_field_ids.items()
  5773. if id_ in ids
  5774. })
  5775. except KeyError:
  5776. raise AssertionError(
  5777. f"Could not find all values of {record} to flush them\n"
  5778. f" Context: {self.env.context}\n"
  5779. f" Cache: {self.env.cache!r}"
  5780. )
  5781. model.browse(some_ids)._write_multi(vals_list)
  5782. #
  5783. # New records - represent records that do not exist in the database yet;
  5784. # they are used to perform onchanges.
  5785. #
  5786. @api.model
  5787. def new(self, values=None, origin=None, ref=None) -> Self:
  5788. """ new([values], [origin], [ref]) -> record
  5789. Return a new record instance attached to the current environment and
  5790. initialized with the provided ``value``. The record is *not* created
  5791. in database, it only exists in memory.
  5792. One can pass an ``origin`` record, which is the actual record behind the
  5793. result. It is retrieved as ``record._origin``. Two new records with the
  5794. same origin record are considered equal.
  5795. One can also pass a ``ref`` value to identify the record among other new
  5796. records. The reference is encapsulated in the ``id`` of the record.
  5797. """
  5798. if values is None:
  5799. values = {}
  5800. if origin is not None:
  5801. origin = origin.id
  5802. record = self.browse((NewId(origin, ref),))
  5803. record._update_cache(values, validate=False)
  5804. return record
  5805. @property
  5806. def _origin(self) -> Self:
  5807. """ Return the actual records corresponding to ``self``. """
  5808. ids = tuple(origin_ids(self._ids))
  5809. prefetch_ids = OriginIds(self._prefetch_ids)
  5810. return self.__class__(self.env, ids, prefetch_ids)
  5811. #
  5812. # "Dunder" methods
  5813. #
  5814. def __bool__(self):
  5815. """ Test whether ``self`` is nonempty. """
  5816. return True if self._ids else False # fast version of bool(self._ids)
  5817. def __len__(self):
  5818. """ Return the size of ``self``. """
  5819. return len(self._ids)
  5820. def __iter__(self) -> typing.Iterator[Self]:
  5821. """ Return an iterator over ``self``. """
  5822. if len(self._ids) > PREFETCH_MAX and self._prefetch_ids is self._ids:
  5823. for ids in self.env.cr.split_for_in_conditions(self._ids):
  5824. for id_ in ids:
  5825. yield self.__class__(self.env, (id_,), ids)
  5826. else:
  5827. for id_ in self._ids:
  5828. yield self.__class__(self.env, (id_,), self._prefetch_ids)
  5829. def __reversed__(self) -> typing.Iterator[Self]:
  5830. """ Return an reversed iterator over ``self``. """
  5831. if len(self._ids) > PREFETCH_MAX and self._prefetch_ids is self._ids:
  5832. for ids in self.env.cr.split_for_in_conditions(reversed(self._ids)):
  5833. for id_ in ids:
  5834. yield self.__class__(self.env, (id_,), ids)
  5835. elif self._ids:
  5836. prefetch_ids = ReversedIterable(self._prefetch_ids)
  5837. for id_ in reversed(self._ids):
  5838. yield self.__class__(self.env, (id_,), prefetch_ids)
  5839. def __contains__(self, item):
  5840. """ Test whether ``item`` (record or field name) is an element of ``self``.
  5841. In the first case, the test is fully equivalent to::
  5842. any(item == record for record in self)
  5843. """
  5844. try:
  5845. if self._name == item._name:
  5846. return len(item) == 1 and item.id in self._ids
  5847. raise TypeError(f"inconsistent models in: {item} in {self}")
  5848. except AttributeError:
  5849. if isinstance(item, str):
  5850. return item in self._fields
  5851. raise TypeError(f"unsupported operand types in: {item!r} in {self}")
  5852. def __add__(self, other) -> Self:
  5853. """ Return the concatenation of two recordsets. """
  5854. return self.concat(other)
  5855. def concat(self, *args) -> Self:
  5856. """ Return the concatenation of ``self`` with all the arguments (in
  5857. linear time complexity).
  5858. """
  5859. ids = list(self._ids)
  5860. for arg in args:
  5861. try:
  5862. if arg._name != self._name:
  5863. raise TypeError(f"inconsistent models in: {self} + {arg}")
  5864. ids.extend(arg._ids)
  5865. except AttributeError:
  5866. raise TypeError(f"unsupported operand types in: {self} + {arg!r}")
  5867. return self.browse(ids)
  5868. def __sub__(self, other) -> Self:
  5869. """ Return the recordset of all the records in ``self`` that are not in
  5870. ``other``. Note that recordset order is preserved.
  5871. """
  5872. try:
  5873. if self._name != other._name:
  5874. raise TypeError(f"inconsistent models in: {self} - {other}")
  5875. other_ids = set(other._ids)
  5876. return self.browse([id for id in self._ids if id not in other_ids])
  5877. except AttributeError:
  5878. raise TypeError(f"unsupported operand types in: {self} - {other!r}")
  5879. def __and__(self, other) -> Self:
  5880. """ Return the intersection of two recordsets.
  5881. Note that first occurrence order is preserved.
  5882. """
  5883. try:
  5884. if self._name != other._name:
  5885. raise TypeError(f"inconsistent models in: {self} & {other}")
  5886. other_ids = set(other._ids)
  5887. return self.browse(OrderedSet(id for id in self._ids if id in other_ids))
  5888. except AttributeError:
  5889. raise TypeError(f"unsupported operand types in: {self} & {other!r}")
  5890. def __or__(self, other) -> Self:
  5891. """ Return the union of two recordsets.
  5892. Note that first occurrence order is preserved.
  5893. """
  5894. return self.union(other)
  5895. def union(self, *args) -> Self:
  5896. """ Return the union of ``self`` with all the arguments (in linear time
  5897. complexity, with first occurrence order preserved).
  5898. """
  5899. ids = list(self._ids)
  5900. for arg in args:
  5901. try:
  5902. if arg._name != self._name:
  5903. raise TypeError(f"inconsistent models in: {self} | {arg}")
  5904. ids.extend(arg._ids)
  5905. except AttributeError:
  5906. raise TypeError(f"unsupported operand types in: {self} | {arg!r}")
  5907. return self.browse(OrderedSet(ids))
  5908. def __eq__(self, other):
  5909. """ Test whether two recordsets are equivalent (up to reordering). """
  5910. try:
  5911. return self._name == other._name and set(self._ids) == set(other._ids)
  5912. except AttributeError:
  5913. if other:
  5914. warnings.warn(f"unsupported operand type(s) for \"==\": '{self._name}()' == '{other!r}'", stacklevel=2)
  5915. return NotImplemented
  5916. def __lt__(self, other):
  5917. try:
  5918. if self._name == other._name:
  5919. return set(self._ids) < set(other._ids)
  5920. except AttributeError:
  5921. pass
  5922. return NotImplemented
  5923. def __le__(self, other):
  5924. try:
  5925. if self._name == other._name:
  5926. # these are much cheaper checks than a proper subset check, so
  5927. # optimise for checking if a null or singleton are subsets of a
  5928. # recordset
  5929. if not self or self in other:
  5930. return True
  5931. return set(self._ids) <= set(other._ids)
  5932. except AttributeError:
  5933. pass
  5934. return NotImplemented
  5935. def __gt__(self, other):
  5936. try:
  5937. if self._name == other._name:
  5938. return set(self._ids) > set(other._ids)
  5939. except AttributeError:
  5940. pass
  5941. return NotImplemented
  5942. def __ge__(self, other):
  5943. try:
  5944. if self._name == other._name:
  5945. if not other or other in self:
  5946. return True
  5947. return set(self._ids) >= set(other._ids)
  5948. except AttributeError:
  5949. pass
  5950. return NotImplemented
  5951. def __int__(self):
  5952. return self.id or 0
  5953. def __repr__(self):
  5954. return f"{self._name}{self._ids!r}"
  5955. def __hash__(self):
  5956. return hash((self._name, frozenset(self._ids)))
  5957. @typing.overload
  5958. def __getitem__(self, key: int | slice) -> Self: ...
  5959. @typing.overload
  5960. def __getitem__(self, key: str) -> typing.Any: ...
  5961. def __getitem__(self, key):
  5962. """ If ``key`` is an integer or a slice, return the corresponding record
  5963. selection as an instance (attached to ``self.env``).
  5964. Otherwise read the field ``key`` of the first record in ``self``.
  5965. Examples::
  5966. inst = model.search(dom) # inst is a recordset
  5967. r4 = inst[3] # fourth record in inst
  5968. rs = inst[10:20] # subset of inst
  5969. nm = rs['name'] # name of first record in inst
  5970. """
  5971. if isinstance(key, str):
  5972. # important: one must call the field's getter
  5973. return self._fields[key].__get__(self)
  5974. elif isinstance(key, slice):
  5975. return self.browse(self._ids[key])
  5976. else:
  5977. return self.browse((self._ids[key],))
  5978. def __setitem__(self, key, value):
  5979. """ Assign the field ``key`` to ``value`` in record ``self``. """
  5980. # important: one must call the field's setter
  5981. return self._fields[key].__set__(self, value)
  5982. #
  5983. # Cache and recomputation management
  5984. #
  5985. @property
  5986. def _cache(self):
  5987. """ Return the cache of ``self``, mapping field names to values. """
  5988. return RecordCache(self)
  5989. def _in_cache_without(self, field, limit=PREFETCH_MAX):
  5990. """ Return records to prefetch that have no value in cache for ``field``
  5991. (:class:`Field` instance), including ``self``.
  5992. Return at most ``limit`` records.
  5993. """
  5994. ids = expand_ids(self.id, self._prefetch_ids)
  5995. ids = self.env.cache.get_missing_ids(self.browse(ids), field)
  5996. if limit:
  5997. ids = itertools.islice(ids, limit)
  5998. # Those records are aimed at being either fetched, or computed. But the
  5999. # method '_fetch_field' is not correct with new records: it considers
  6000. # them as forbidden records, and clears their cache! On the other hand,
  6001. # compute methods are not invoked with a mix of real and new records for
  6002. # the sake of code simplicity.
  6003. return self.browse(ids)
  6004. def invalidate_model(self, fnames=None, flush=True):
  6005. """ Invalidate the cache of all records of ``self``'s model, when the
  6006. cached values no longer correspond to the database values. If the
  6007. parameter is given, only the given fields are invalidated from cache.
  6008. :param fnames: optional iterable of field names to invalidate
  6009. :param flush: whether pending updates should be flushed before invalidation.
  6010. It is ``True`` by default, which ensures cache consistency.
  6011. Do not use this parameter unless you know what you are doing.
  6012. """
  6013. if flush:
  6014. self.flush_model(fnames)
  6015. self._invalidate_cache(fnames)
  6016. def invalidate_recordset(self, fnames=None, flush=True):
  6017. """ Invalidate the cache of the records in ``self``, when the cached
  6018. values no longer correspond to the database values. If the parameter
  6019. is given, only the given fields on ``self`` are invalidated from cache.
  6020. :param fnames: optional iterable of field names to invalidate
  6021. :param flush: whether pending updates should be flushed before invalidation.
  6022. It is ``True`` by default, which ensures cache consistency.
  6023. Do not use this parameter unless you know what you are doing.
  6024. """
  6025. if flush:
  6026. self.flush_recordset(fnames)
  6027. self._invalidate_cache(fnames, self._ids)
  6028. def _invalidate_cache(self, fnames=None, ids=None):
  6029. if fnames is None:
  6030. fields = self._fields.values()
  6031. else:
  6032. fields = [self._fields[fname] for fname in fnames]
  6033. spec = []
  6034. for field in fields:
  6035. spec.append((field, ids))
  6036. # TODO VSC: used to remove the inverse of many_to_one from the cache, though we might not need it anymore
  6037. for invf in self.pool.field_inverses[field]:
  6038. self.env[invf.model_name].flush_model([invf.name])
  6039. spec.append((invf, None))
  6040. self.env.cache.invalidate(spec)
  6041. def modified(self, fnames, create=False, before=False):
  6042. """ Notify that fields will be or have been modified on ``self``. This
  6043. invalidates the cache where necessary, and prepares the recomputation of
  6044. dependent stored fields.
  6045. :param fnames: iterable of field names modified on records ``self``
  6046. :param create: whether called in the context of record creation
  6047. :param before: whether called before modifying records ``self``
  6048. """
  6049. if not self or not fnames:
  6050. return
  6051. # The triggers of a field F is a tree that contains the fields that
  6052. # depend on F, together with the fields to inverse to find out which
  6053. # records to recompute.
  6054. #
  6055. # For instance, assume that G depends on F, H depends on X.F, I depends
  6056. # on W.X.F, and J depends on Y.F. The triggers of F will be the tree:
  6057. #
  6058. # [G]
  6059. # X/ \Y
  6060. # [H] [J]
  6061. # W/
  6062. # [I]
  6063. #
  6064. # This tree provides perfect support for the trigger mechanism:
  6065. # when F is # modified on records,
  6066. # - mark G to recompute on records,
  6067. # - mark H to recompute on inverse(X, records),
  6068. # - mark I to recompute on inverse(W, inverse(X, records)),
  6069. # - mark J to recompute on inverse(Y, records).
  6070. if before:
  6071. # When called before modification, we should determine what
  6072. # currently depends on self, and it should not be recomputed before
  6073. # the modification. So we only collect what should be marked for
  6074. # recomputation.
  6075. marked = self.env.transaction.tocompute # {field: ids}
  6076. tomark = defaultdict(OrderedSet) # {field: ids}
  6077. else:
  6078. # When called after modification, one should traverse backwards
  6079. # dependencies by taking into account all fields already known to
  6080. # be recomputed. In that case, we mark fieds to compute as soon as
  6081. # possible.
  6082. marked = {}
  6083. tomark = self.env.transaction.tocompute
  6084. # determine what to trigger (with iterators)
  6085. todo = [self._modified([self._fields[fname] for fname in fnames], create)]
  6086. # process what to trigger by lazily chaining todo
  6087. for field, records, create in itertools.chain.from_iterable(todo):
  6088. records -= self.env.protected(field)
  6089. if not records:
  6090. continue
  6091. if field.recursive:
  6092. # discard already processed records, in order to avoid cycles
  6093. if field.compute and field.store:
  6094. ids = (marked.get(field) or set()) | (tomark.get(field) or set())
  6095. records = records.browse(id_ for id_ in records._ids if id_ not in ids)
  6096. else:
  6097. records = records & self.env.cache.get_records(records, field, all_contexts=True)
  6098. if not records:
  6099. continue
  6100. # recursively trigger recomputation of field's dependents
  6101. todo.append(records._modified([field], create))
  6102. # mark for recomputation (now or later, depending on 'before')
  6103. if field.compute and field.store:
  6104. tomark[field].update(records._ids)
  6105. else:
  6106. # Don't force the recomputation of compute fields which are
  6107. # not stored as this is not really necessary.
  6108. self.env.cache.invalidate([(field, records._ids)])
  6109. if before:
  6110. # effectively mark for recomputation now
  6111. for field, ids in tomark.items():
  6112. records = self.env[field.model_name].browse(ids)
  6113. self.env.add_to_compute(field, records)
  6114. def _modified(self, fields, create):
  6115. """ Return an iterator traversing a tree of field triggers on ``self``,
  6116. traversing backwards field dependencies along the way, and yielding
  6117. tuple ``(field, records, created)`` to recompute.
  6118. """
  6119. cache = self.env.cache
  6120. # The fields' trigger trees are merged in order to evaluate all triggers
  6121. # at once. For non-stored computed fields, `_modified_triggers` might
  6122. # traverse the tree (at the cost of extra queries) only to know which
  6123. # records to invalidate in cache. But in many cases, most of these
  6124. # fields have no data in cache, so they can be ignored from the start.
  6125. # This allows us to discard subtrees from the merged tree when they
  6126. # only contain such fields.
  6127. def select(field):
  6128. return (field.compute and field.store) or cache.contains_field(field)
  6129. tree = self.pool.get_trigger_tree(fields, select=select)
  6130. if not tree:
  6131. return ()
  6132. return self.sudo().with_context(active_test=False)._modified_triggers(tree, create)
  6133. def _modified_triggers(self, tree, create=False):
  6134. """ Return an iterator traversing a tree of field triggers on ``self``,
  6135. traversing backwards field dependencies along the way, and yielding
  6136. tuple ``(field, records, created)`` to recompute.
  6137. """
  6138. if not self:
  6139. return
  6140. # first yield what to compute
  6141. for field in tree.root:
  6142. yield field, self, create
  6143. # then traverse dependencies backwards, and proceed recursively
  6144. for field, subtree in tree.items():
  6145. if create and field.type in ('many2one', 'many2one_reference'):
  6146. # upon creation, no other record has a reference to self
  6147. continue
  6148. # subtree is another tree of dependencies
  6149. model = self.env[field.model_name]
  6150. for invf in model.pool.field_inverses[field]:
  6151. # use an inverse of field without domain
  6152. if not (invf.type in ('one2many', 'many2many') and invf.domain):
  6153. if invf.type == 'many2one_reference':
  6154. rec_ids = OrderedSet()
  6155. for rec in self:
  6156. try:
  6157. if rec[invf.model_field] == field.model_name:
  6158. rec_ids.add(rec[invf.name])
  6159. except MissingError:
  6160. continue
  6161. records = model.browse(rec_ids)
  6162. else:
  6163. try:
  6164. records = self[invf.name]
  6165. except MissingError:
  6166. records = self.exists()[invf.name]
  6167. # TODO: find a better fix
  6168. if field.model_name == records._name:
  6169. if not any(self._ids):
  6170. # if self are new, records should be new as well
  6171. records = records.browse(it and NewId(it) for it in records._ids)
  6172. break
  6173. else:
  6174. new_records = self.filtered(lambda r: not r.id)
  6175. real_records = self - new_records
  6176. records = model.browse()
  6177. if real_records:
  6178. records = model.search([(field.name, 'in', real_records.ids)], order='id')
  6179. if new_records:
  6180. cache_records = self.env.cache.get_records(model, field)
  6181. new_ids = set(self._ids)
  6182. records |= cache_records.filtered(lambda r: not set(r[field.name]._ids).isdisjoint(new_ids))
  6183. yield from records._modified_triggers(subtree)
  6184. def _recompute_model(self, fnames=None):
  6185. """ Process the pending computations of the fields of ``self``'s model.
  6186. :param fnames: optional iterable of field names to compute
  6187. """
  6188. if fnames is None:
  6189. fields = self._fields.values()
  6190. else:
  6191. fields = [self._fields[fname] for fname in fnames]
  6192. for field in fields:
  6193. if field.compute and field.store:
  6194. self._recompute_field(field)
  6195. def _recompute_recordset(self, fnames=None):
  6196. """ Process the pending computations of the fields of the records in ``self``.
  6197. :param fnames: optional iterable of field names to compute
  6198. """
  6199. if fnames is None:
  6200. fields = self._fields.values()
  6201. else:
  6202. fields = [self._fields[fname] for fname in fnames]
  6203. for field in fields:
  6204. if field.compute and field.store:
  6205. self._recompute_field(field, self._ids)
  6206. def _recompute_field(self, field, ids=None):
  6207. ids_to_compute = self.env.transaction.tocompute.get(field, ())
  6208. if ids is None:
  6209. ids = ids_to_compute
  6210. else:
  6211. ids = [id_ for id_ in ids if id_ in ids_to_compute]
  6212. if not ids:
  6213. return
  6214. # do not force recomputation on new records; those will be
  6215. # recomputed by accessing the field on the records
  6216. records = self.browse(tuple(id_ for id_ in ids if id_))
  6217. field.recompute(records)
  6218. #
  6219. # Generic onchange method
  6220. #
  6221. def _has_onchange(self, field, other_fields):
  6222. """ Return whether ``field`` should trigger an onchange event in the
  6223. presence of ``other_fields``.
  6224. """
  6225. return (field.name in self._onchange_methods) or any(
  6226. dep in other_fields
  6227. for dep in self.pool.get_dependent_fields(field.base_field)
  6228. )
  6229. def _apply_onchange_methods(self, field_name, result):
  6230. """ Apply onchange method(s) for field ``field_name`` on ``self``. Value
  6231. assignments are applied on ``self``, while warning messages are put
  6232. in dictionary ``result``.
  6233. """
  6234. for method in self._onchange_methods.get(field_name, ()):
  6235. res = method(self)
  6236. if not res:
  6237. continue
  6238. if res.get('value'):
  6239. for key, val in res['value'].items():
  6240. if key in self._fields and key != 'id':
  6241. self[key] = val
  6242. if res.get('warning'):
  6243. result['warnings'].add((
  6244. res['warning'].get('title') or _("Warning"),
  6245. res['warning'].get('message') or "",
  6246. res['warning'].get('type') or "",
  6247. ))
  6248. def onchange(self, values: dict, field_names: list[str], fields_spec: dict):
  6249. raise NotImplementedError("onchange() is implemented in module 'web'")
  6250. def _get_placeholder_filename(self, field):
  6251. """ Returns the filename of the placeholder to use,
  6252. set on web/static/img by default, or the
  6253. complete path to access it (eg: module/path/to/image.png).
  6254. """
  6255. return False
  6256. collections.abc.Set.register(BaseModel)
  6257. # not exactly true as BaseModel doesn't have index or count
  6258. collections.abc.Sequence.register(BaseModel)
  6259. class RecordCache(MutableMapping):
  6260. """ A mapping from field names to values, to read and update the cache of a record. """
  6261. __slots__ = ['_record']
  6262. def __init__(self, record):
  6263. assert len(record) == 1, "Unexpected RecordCache(%s)" % record
  6264. self._record = record
  6265. def __contains__(self, name):
  6266. """ Return whether `record` has a cached value for field ``name``. """
  6267. field = self._record._fields[name]
  6268. return self._record.env.cache.contains(self._record, field)
  6269. def __getitem__(self, name):
  6270. """ Return the cached value of field ``name`` for `record`. """
  6271. field = self._record._fields[name]
  6272. return self._record.env.cache.get(self._record, field)
  6273. def __setitem__(self, name, value):
  6274. """ Assign the cached value of field ``name`` for ``record``. """
  6275. field = self._record._fields[name]
  6276. self._record.env.cache.set(self._record, field, value)
  6277. def __delitem__(self, name):
  6278. """ Remove the cached value of field ``name`` for ``record``. """
  6279. field = self._record._fields[name]
  6280. self._record.env.cache.remove(self._record, field)
  6281. def __iter__(self):
  6282. """ Iterate over the field names with a cached value. """
  6283. for field in self._record.env.cache.get_fields(self._record):
  6284. yield field.name
  6285. def __len__(self):
  6286. """ Return the number of fields with a cached value. """
  6287. return sum(1 for name in self)
  6288. AbstractModel = BaseModel
  6289. class Model(AbstractModel):
  6290. """ Main super-class for regular database-persisted Odoo models.
  6291. Odoo models are created by inheriting from this class::
  6292. class user(Model):
  6293. ...
  6294. The system will later instantiate the class once per database (on
  6295. which the class' module is installed).
  6296. """
  6297. _auto = True # automatically create database backend
  6298. _register = False # not visible in ORM registry, meant to be python-inherited only
  6299. _abstract = False # not abstract
  6300. _transient = False # not transient
  6301. class TransientModel(Model):
  6302. """ Model super-class for transient records, meant to be temporarily
  6303. persistent, and regularly vacuum-cleaned.
  6304. A TransientModel has a simplified access rights management, all users can
  6305. create new records, and may only access the records they created. The
  6306. superuser has unrestricted access to all TransientModel records.
  6307. """
  6308. _auto = True # automatically create database backend
  6309. _register = False # not visible in ORM registry, meant to be python-inherited only
  6310. _abstract = False # not abstract
  6311. _transient = True # transient
  6312. @api.autovacuum
  6313. def _transient_vacuum(self):
  6314. """Clean the transient records.
  6315. This unlinks old records from the transient model tables whenever the
  6316. :attr:`_transient_max_count` or :attr:`_transient_max_hours` conditions
  6317. (if any) are reached.
  6318. Actual cleaning will happen only once every 5 minutes. This means this
  6319. method can be called frequently (e.g. whenever a new record is created).
  6320. Example with both max_hours and max_count active:
  6321. Suppose max_hours = 0.2 (aka 12 minutes), max_count = 20, there are
  6322. 55 rows in the table, 10 created/changed in the last 5 minutes, an
  6323. additional 12 created/changed between 5 and 10 minutes ago, the rest
  6324. created/changed more than 12 minutes ago.
  6325. - age based vacuum will leave the 22 rows created/changed in the last 12
  6326. minutes
  6327. - count based vacuum will wipe out another 12 rows. Not just 2,
  6328. otherwise each addition would immediately cause the maximum to be
  6329. reached again.
  6330. - the 10 rows that have been created/changed the last 5 minutes will NOT
  6331. be deleted
  6332. """
  6333. if self._transient_max_hours:
  6334. # Age-based expiration
  6335. self._transient_clean_rows_older_than(self._transient_max_hours * 60 * 60)
  6336. if self._transient_max_count:
  6337. # Count-based expiration
  6338. self._transient_clean_old_rows(self._transient_max_count)
  6339. def _transient_clean_old_rows(self, max_count):
  6340. # Check how many rows we have in the table
  6341. self._cr.execute(SQL("SELECT count(*) FROM %s", SQL.identifier(self._table)))
  6342. [count] = self._cr.fetchone()
  6343. if count > max_count:
  6344. self._transient_clean_rows_older_than(300)
  6345. def _transient_clean_rows_older_than(self, seconds):
  6346. # Never delete rows used in last 5 minutes
  6347. seconds = max(seconds, 300)
  6348. self._cr.execute(SQL(
  6349. "SELECT id FROM %s WHERE %s < %s %s",
  6350. SQL.identifier(self._table),
  6351. SQL("COALESCE(write_date, create_date, (now() AT TIME ZONE 'UTC'))::timestamp"),
  6352. SQL("(now() AT TIME ZONE 'UTC') - interval %s", f"{seconds} seconds"),
  6353. SQL(f"LIMIT { GC_UNLINK_LIMIT }"),
  6354. ))
  6355. ids = [x[0] for x in self._cr.fetchall()]
  6356. self.sudo().browse(ids).unlink()
  6357. if len(ids) >= GC_UNLINK_LIMIT:
  6358. self.env.ref('base.autovacuum_job')._trigger()
  6359. def itemgetter_tuple(items):
  6360. """ Fixes itemgetter inconsistency (useful in some cases) of not returning
  6361. a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
  6362. """
  6363. if len(items) == 0:
  6364. return lambda a: ()
  6365. if len(items) == 1:
  6366. return lambda gettable: (gettable[items[0]],)
  6367. return operator.itemgetter(*items)
  6368. def convert_pgerror_not_null(model, fields, info, e):
  6369. env = model.env
  6370. if e.diag.table_name != model._table:
  6371. return {'message': env._("Missing required value for the field '%(name)s' on a linked model [%(linked_model)s]", name=e.diag.column_name, linked_model=e.diag.table_name)}
  6372. field_name = e.diag.column_name
  6373. field = fields[field_name]
  6374. message = env._("Missing required value for the field '%(name)s' (%(technical_name)s)", name=field['string'], technical_name=field_name)
  6375. return {
  6376. 'message': message,
  6377. 'field': field_name,
  6378. }
  6379. def convert_pgerror_unique(model, fields, info, e):
  6380. # new cursor since we're probably in an error handler in a blown
  6381. # transaction which may not have been rollbacked/cleaned yet
  6382. with closing(model.env.registry.cursor()) as cr_tmp:
  6383. cr_tmp.execute(SQL("""
  6384. SELECT
  6385. conname AS "constraint name",
  6386. t.relname AS "table name",
  6387. ARRAY(
  6388. SELECT attname FROM pg_attribute
  6389. WHERE attrelid = conrelid
  6390. AND attnum = ANY(conkey)
  6391. ) as "columns"
  6392. FROM pg_constraint
  6393. JOIN pg_class t ON t.oid = conrelid
  6394. WHERE conname = %s
  6395. """, e.diag.constraint_name))
  6396. constraint, table, ufields = cr_tmp.fetchone() or (None, None, None)
  6397. # if the unique constraint is on an expression or on an other table
  6398. if not ufields or model._table != table:
  6399. return {'message': tools.exception_to_unicode(e)}
  6400. # TODO: add stuff from e.diag.message_hint? provides details about the constraint & duplication values but may be localized...
  6401. if len(ufields) == 1:
  6402. field_name = ufields[0]
  6403. field = fields[field_name]
  6404. message = model.env._(
  6405. "The value for the field '%(field)s' already exists (this is probably '%(other_field)s' in the current model).",
  6406. field=field_name,
  6407. other_field=field['string'],
  6408. )
  6409. return {
  6410. 'message': message,
  6411. 'field': field_name,
  6412. }
  6413. field_strings = [fields[fname]['string'] for fname in ufields]
  6414. message = model.env._(
  6415. "The values for the fields '%(fields)s' already exist (they are probably '%(other_fields)s' in the current model).",
  6416. fields=format_list(model.env, ufields),
  6417. other_fields=format_list(model.env, field_strings),
  6418. )
  6419. return {
  6420. 'message': message,
  6421. # no field, unclear which one we should pick and they could be in any order
  6422. }
  6423. def convert_pgerror_constraint(model, fields, info, e):
  6424. sql_constraints = dict([(('%s_%s') % (e.diag.table_name, x[0]), x) for x in model._sql_constraints])
  6425. if e.diag.constraint_name in sql_constraints.keys():
  6426. return {'message': "'%s'" % sql_constraints[e.diag.constraint_name][2]}
  6427. return {'message': tools.exception_to_unicode(e)}
  6428. PGERROR_TO_OE = defaultdict(
  6429. # shape of mapped converters
  6430. lambda: (lambda model, fvg, info, pgerror: {'message': tools.exception_to_unicode(pgerror)}),
  6431. {
  6432. '23502': convert_pgerror_not_null,
  6433. '23505': convert_pgerror_unique,
  6434. '23514': convert_pgerror_constraint,
  6435. },
  6436. )
  6437. # keep those imports here to avoid dependency cycle errors
  6438. # pylint: disable=wrong-import-position
  6439. from . import fields
  6440. from .osv import expression
  6441. from .fields import Field, Datetime, Command
上海开阖软件有限公司 沪ICP备12045867号-1