gooderp18绿色标准版
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

858 line
31KB

  1. # Part of Odoo. See LICENSE file for full copyright and licensing details.
  2. """
  3. The PostgreSQL connector is a connectivity layer between the OpenERP code and
  4. the database, *not* a database abstraction toolkit. Database abstraction is what
  5. the ORM does, in fact.
  6. """
  7. from __future__ import annotations
  8. import logging
  9. import os
  10. import re
  11. import threading
  12. import time
  13. import typing
  14. import uuid
  15. from contextlib import contextmanager
  16. from datetime import datetime, timedelta
  17. from inspect import currentframe
  18. import psycopg2
  19. import psycopg2.extensions
  20. import psycopg2.extras
  21. from psycopg2.extensions import ISOLATION_LEVEL_REPEATABLE_READ
  22. from psycopg2.pool import PoolError
  23. from psycopg2.sql import Composable
  24. from werkzeug import urls
  25. import odoo
  26. from . import tools
  27. from .tools import SQL
  28. from .tools.func import frame_codeinfo, locked
  29. from .tools.misc import Callbacks
  30. if typing.TYPE_CHECKING:
  31. from collections.abc import Iterable, Iterator
  32. T = typing.TypeVar('T')
  33. def undecimalize(value, cr):
  34. if value is None:
  35. return None
  36. return float(value)
  37. DECIMAL_TO_FLOAT_TYPE = psycopg2.extensions.new_type((1700,), 'float', undecimalize)
  38. psycopg2.extensions.register_type(DECIMAL_TO_FLOAT_TYPE)
  39. psycopg2.extensions.register_type(psycopg2.extensions.new_array_type((1231,), 'float[]', DECIMAL_TO_FLOAT_TYPE))
  40. _logger = logging.getLogger(__name__)
  41. _logger_conn = _logger.getChild("connection")
  42. real_time = time.time.__call__ # ensure we have a non patched time for query times when using freezegun
  43. re_from = re.compile(r'\bfrom\s+"?([a-zA-Z_0-9]+)\b', re.IGNORECASE)
  44. re_into = re.compile(r'\binto\s+"?([a-zA-Z_0-9]+)\b', re.IGNORECASE)
  45. def categorize_query(decoded_query):
  46. res_into = re_into.search(decoded_query)
  47. # prioritize `insert` over `select` so `select` subqueries are not
  48. # considered when inside a `insert`
  49. if res_into:
  50. return 'into', res_into.group(1)
  51. res_from = re_from.search(decoded_query)
  52. if res_from:
  53. return 'from', res_from.group(1)
  54. return 'other', None
  55. sql_counter = 0
  56. MAX_IDLE_TIMEOUT = 60 * 10
  57. class Savepoint:
  58. """ Reifies an active breakpoint, allows :meth:`BaseCursor.savepoint` users
  59. to internally rollback the savepoint (as many times as they want) without
  60. having to implement their own savepointing, or triggering exceptions.
  61. Should normally be created using :meth:`BaseCursor.savepoint` rather than
  62. directly.
  63. The savepoint will be rolled back on unsuccessful context exits
  64. (exceptions). It will be released ("committed") on successful context exit.
  65. The savepoint object can be wrapped in ``contextlib.closing`` to
  66. unconditionally roll it back.
  67. The savepoint can also safely be explicitly closed during context body. This
  68. will rollback by default.
  69. :param BaseCursor cr: the cursor to execute the `SAVEPOINT` queries on
  70. """
  71. def __init__(self, cr):
  72. self.name = str(uuid.uuid1())
  73. self._cr = cr
  74. self.closed = False
  75. cr.execute('SAVEPOINT "%s"' % self.name)
  76. def __enter__(self):
  77. return self
  78. def __exit__(self, exc_type, exc_val, exc_tb):
  79. self.close(rollback=exc_type is not None)
  80. def close(self, *, rollback=True):
  81. if not self.closed:
  82. self._close(rollback)
  83. def rollback(self):
  84. self._cr.execute('ROLLBACK TO SAVEPOINT "%s"' % self.name)
  85. def _close(self, rollback):
  86. if rollback:
  87. self.rollback()
  88. self._cr.execute('RELEASE SAVEPOINT "%s"' % self.name)
  89. self.closed = True
  90. class _FlushingSavepoint(Savepoint):
  91. def __init__(self, cr):
  92. cr.flush()
  93. super().__init__(cr)
  94. def rollback(self):
  95. self._cr.clear()
  96. super().rollback()
  97. def _close(self, rollback):
  98. try:
  99. if not rollback:
  100. self._cr.flush()
  101. except Exception:
  102. rollback = True
  103. raise
  104. finally:
  105. super()._close(rollback)
  106. class BaseCursor:
  107. """ Base class for cursors that manage pre/post commit hooks. """
  108. def __init__(self):
  109. self.precommit = Callbacks()
  110. self.postcommit = Callbacks()
  111. self.prerollback = Callbacks()
  112. self.postrollback = Callbacks()
  113. # By default a cursor has no transaction object. A transaction object
  114. # for managing environments is instantiated by registry.cursor(). It
  115. # is not done here in order to avoid cyclic module dependencies.
  116. self.transaction = None
  117. def flush(self):
  118. """ Flush the current transaction, and run precommit hooks. """
  119. if self.transaction is not None:
  120. self.transaction.flush()
  121. self.precommit.run()
  122. def clear(self):
  123. """ Clear the current transaction, and clear precommit hooks. """
  124. if self.transaction is not None:
  125. self.transaction.clear()
  126. self.precommit.clear()
  127. def reset(self):
  128. """ Reset the current transaction (this invalidates more that clear()).
  129. This method should be called only right after commit() or rollback().
  130. """
  131. if self.transaction is not None:
  132. self.transaction.reset()
  133. def savepoint(self, flush=True) -> Savepoint:
  134. """context manager entering in a new savepoint
  135. With ``flush`` (the default), will automatically run (or clear) the
  136. relevant hooks.
  137. """
  138. if flush:
  139. return _FlushingSavepoint(self)
  140. else:
  141. return Savepoint(self)
  142. def __enter__(self):
  143. """ Using the cursor as a contextmanager automatically commits and
  144. closes it::
  145. with cr:
  146. cr.execute(...)
  147. # cr is committed if no failure occurred
  148. # cr is closed in any case
  149. """
  150. return self
  151. def __exit__(self, exc_type, exc_value, traceback):
  152. try:
  153. if exc_type is None:
  154. self.commit()
  155. finally:
  156. self.close()
  157. class Cursor(BaseCursor):
  158. """Represents an open transaction to the PostgreSQL DB backend,
  159. acting as a lightweight wrapper around psycopg2's
  160. ``cursor`` objects.
  161. ``Cursor`` is the object behind the ``cr`` variable used all
  162. over the OpenERP code.
  163. .. rubric:: Transaction Isolation
  164. One very important property of database transactions is the
  165. level of isolation between concurrent transactions.
  166. The SQL standard defines four levels of transaction isolation,
  167. ranging from the most strict *Serializable* level, to the least
  168. strict *Read Uncommitted* level. These levels are defined in
  169. terms of the phenomena that must not occur between concurrent
  170. transactions, such as *dirty read*, etc.
  171. In the context of a generic business data management software
  172. such as OpenERP, we need the best guarantees that no data
  173. corruption can ever be cause by simply running multiple
  174. transactions in parallel. Therefore, the preferred level would
  175. be the *serializable* level, which ensures that a set of
  176. transactions is guaranteed to produce the same effect as
  177. running them one at a time in some order.
  178. However, most database management systems implement a limited
  179. serializable isolation in the form of
  180. `snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
  181. providing most of the same advantages as True Serializability,
  182. with a fraction of the performance cost.
  183. With PostgreSQL up to version 9.0, this snapshot isolation was
  184. the implementation of both the ``REPEATABLE READ`` and
  185. ``SERIALIZABLE`` levels of the SQL standard.
  186. As of PostgreSQL 9.1, the previous snapshot isolation implementation
  187. was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
  188. level was introduced, providing some additional heuristics to
  189. detect a concurrent update by parallel transactions, and forcing
  190. one of them to rollback.
  191. OpenERP implements its own level of locking protection
  192. for transactions that are highly likely to provoke concurrent
  193. updates, such as stock reservations or document sequences updates.
  194. Therefore we mostly care about the properties of snapshot isolation,
  195. but we don't really need additional heuristics to trigger transaction
  196. rollbacks, as we are taking care of triggering instant rollbacks
  197. ourselves when it matters (and we can save the additional performance
  198. hit of these heuristics).
  199. As a result of the above, we have selected ``REPEATABLE READ`` as
  200. the default transaction isolation level for OpenERP cursors, as
  201. it will be mapped to the desired ``snapshot isolation`` level for
  202. all supported PostgreSQL version (>10).
  203. .. attribute:: cache
  204. Cache dictionary with a "request" (-ish) lifecycle, only lives as
  205. long as the cursor itself does and proactively cleared when the
  206. cursor is closed.
  207. This cache should *only* be used to store repeatable reads as it
  208. ignores rollbacks and savepoints, it should not be used to store
  209. *any* data which may be modified during the life of the cursor.
  210. """
  211. IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
  212. def __init__(self, pool, dbname, dsn):
  213. super().__init__()
  214. self.sql_from_log = {}
  215. self.sql_into_log = {}
  216. # default log level determined at cursor creation, could be
  217. # overridden later for debugging purposes
  218. self.sql_log_count = 0
  219. # avoid the call of close() (by __del__) if an exception
  220. # is raised by any of the following initializations
  221. self._closed = True
  222. self.__pool = pool
  223. self.dbname = dbname
  224. self._cnx = pool.borrow(dsn)
  225. self._obj = self._cnx.cursor()
  226. if _logger.isEnabledFor(logging.DEBUG):
  227. self.__caller = frame_codeinfo(currentframe(), 2)
  228. else:
  229. self.__caller = False
  230. self._closed = False # real initialization value
  231. # See the docstring of this class.
  232. self.connection.set_isolation_level(ISOLATION_LEVEL_REPEATABLE_READ)
  233. self.connection.set_session(readonly=pool.readonly)
  234. self.cache = {}
  235. self._now = None
  236. def __build_dict(self, row):
  237. return {d.name: row[i] for i, d in enumerate(self._obj.description)}
  238. def dictfetchone(self):
  239. row = self._obj.fetchone()
  240. return row and self.__build_dict(row)
  241. def dictfetchmany(self, size):
  242. return [self.__build_dict(row) for row in self._obj.fetchmany(size)]
  243. def dictfetchall(self):
  244. return [self.__build_dict(row) for row in self._obj.fetchall()]
  245. def __del__(self):
  246. if not self._closed and not self._cnx.closed:
  247. # Oops. 'self' has not been closed explicitly.
  248. # The cursor will be deleted by the garbage collector,
  249. # but the database connection is not put back into the connection
  250. # pool, preventing some operation on the database like dropping it.
  251. # This can also lead to a server overload.
  252. msg = "Cursor not closed explicitly\n"
  253. if self.__caller:
  254. msg += "Cursor was created at %s:%s" % self.__caller
  255. else:
  256. msg += "Please enable sql debugging to trace the caller."
  257. _logger.warning(msg)
  258. self._close(True)
  259. def _format(self, query, params=None):
  260. encoding = psycopg2.extensions.encodings[self.connection.encoding]
  261. return self.mogrify(query, params).decode(encoding, 'replace')
  262. def mogrify(self, query, params=None):
  263. if isinstance(query, SQL):
  264. assert params is None, "Unexpected parameters for SQL query object"
  265. query, params = query.code, query.params
  266. return self._obj.mogrify(query, params)
  267. def execute(self, query, params=None, log_exceptions=True):
  268. global sql_counter
  269. if isinstance(query, SQL):
  270. assert params is None, "Unexpected parameters for SQL query object"
  271. query, params = query.code, query.params
  272. if params and not isinstance(params, (tuple, list, dict)):
  273. # psycopg2's TypeError is not clear if you mess up the params
  274. raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
  275. start = real_time()
  276. try:
  277. params = params or None
  278. res = self._obj.execute(query, params)
  279. except Exception as e:
  280. if log_exceptions:
  281. _logger.error("bad query: %s\nERROR: %s", self._obj.query or query, e)
  282. raise
  283. finally:
  284. delay = real_time() - start
  285. if _logger.isEnabledFor(logging.DEBUG):
  286. _logger.debug("[%.3f ms] query: %s", 1000 * delay, self._format(query, params))
  287. # simple query count is always computed
  288. self.sql_log_count += 1
  289. sql_counter += 1
  290. current_thread = threading.current_thread()
  291. if hasattr(current_thread, 'query_count'):
  292. current_thread.query_count += 1
  293. current_thread.query_time += delay
  294. # optional hooks for performance and tracing analysis
  295. for hook in getattr(current_thread, 'query_hooks', ()):
  296. hook(self, query, params, start, delay)
  297. # advanced stats
  298. if _logger.isEnabledFor(logging.DEBUG):
  299. query_type, table = categorize_query(self._obj.query.decode())
  300. log_target = None
  301. if query_type == 'into':
  302. log_target = self.sql_into_log
  303. elif query_type == 'from':
  304. log_target = self.sql_from_log
  305. if log_target:
  306. stats = log_target.setdefault(table, [0, 0])
  307. stats[0] += 1
  308. stats[1] += delay * 1E6
  309. return res
  310. def execute_values(self, query, argslist, template=None, page_size=100, fetch=False):
  311. """
  312. A proxy for psycopg2.extras.execute_values which can log all queries like execute.
  313. But this method cannot set log_exceptions=False like execute
  314. """
  315. # Odoo Cursor only proxies all methods of psycopg2 Cursor. This is a patch for problems caused by passing
  316. # self instead of self._obj to the first parameter of psycopg2.extras.execute_values.
  317. if isinstance(query, Composable):
  318. query = query.as_string(self._obj)
  319. return psycopg2.extras.execute_values(self, query, argslist, template=template, page_size=page_size, fetch=fetch)
  320. def split_for_in_conditions(self, ids: Iterable[T], size: int = 0) -> Iterator[tuple[T, ...]]:
  321. """Split a list of identifiers into one or more smaller tuples
  322. safe for IN conditions, after uniquifying them."""
  323. return tools.misc.split_every(size or self.IN_MAX, ids)
  324. def print_log(self):
  325. global sql_counter
  326. if not _logger.isEnabledFor(logging.DEBUG):
  327. return
  328. def process(type):
  329. sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
  330. sum = 0
  331. if sqllogs[type]:
  332. sqllogitems = sqllogs[type].items()
  333. _logger.debug("SQL LOG %s:", type)
  334. for r in sorted(sqllogitems, key=lambda k: k[1]):
  335. delay = timedelta(microseconds=r[1][1])
  336. _logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
  337. sum += r[1][1]
  338. sqllogs[type].clear()
  339. sum = timedelta(microseconds=sum)
  340. _logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
  341. sqllogs[type].clear()
  342. process('from')
  343. process('into')
  344. self.sql_log_count = 0
  345. @contextmanager
  346. def _enable_logging(self):
  347. """ Forcefully enables logging for this cursor, restores it afterwards.
  348. Updates the logger in-place, so not thread-safe.
  349. """
  350. level = _logger.level
  351. _logger.setLevel(logging.DEBUG)
  352. try:
  353. yield
  354. finally:
  355. _logger.setLevel(level)
  356. def close(self):
  357. if not self.closed:
  358. return self._close(False)
  359. def _close(self, leak=False):
  360. if not self._obj:
  361. return
  362. del self.cache
  363. # advanced stats only at logging.DEBUG level
  364. self.print_log()
  365. self._obj.close()
  366. # This force the cursor to be freed, and thus, available again. It is
  367. # important because otherwise we can overload the server very easily
  368. # because of a cursor shortage (because cursors are not garbage
  369. # collected as fast as they should). The problem is probably due in
  370. # part because browse records keep a reference to the cursor.
  371. del self._obj
  372. # Clean the underlying connection, and run rollback hooks.
  373. self.rollback()
  374. self._closed = True
  375. if leak:
  376. self._cnx.leaked = True
  377. else:
  378. chosen_template = tools.config['db_template']
  379. keep_in_pool = self.dbname not in ('template0', 'template1', 'postgres', chosen_template)
  380. self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
  381. def commit(self):
  382. """ Perform an SQL `COMMIT` """
  383. self.flush()
  384. result = self._cnx.commit()
  385. self.clear()
  386. self._now = None
  387. self.prerollback.clear()
  388. self.postrollback.clear()
  389. self.postcommit.run()
  390. return result
  391. def rollback(self):
  392. """ Perform an SQL `ROLLBACK` """
  393. self.clear()
  394. self.postcommit.clear()
  395. self.prerollback.run()
  396. result = self._cnx.rollback()
  397. self._now = None
  398. self.postrollback.run()
  399. return result
  400. def __getattr__(self, name):
  401. if self._closed and name == '_obj':
  402. raise psycopg2.InterfaceError("Cursor already closed")
  403. return getattr(self._obj, name)
  404. @property
  405. def closed(self):
  406. return self._closed or self._cnx.closed
  407. @property
  408. def readonly(self):
  409. return bool(self._cnx.readonly)
  410. def now(self):
  411. """ Return the transaction's timestamp ``NOW() AT TIME ZONE 'UTC'``. """
  412. if self._now is None:
  413. self.execute("SELECT (now() AT TIME ZONE 'UTC')")
  414. self._now = self.fetchone()[0]
  415. return self._now
  416. class TestCursor(BaseCursor):
  417. """ A pseudo-cursor to be used for tests, on top of a real cursor. It keeps
  418. the transaction open across requests, and simulates committing, rolling
  419. back, and closing:
  420. +------------------------+---------------------------------------------------+
  421. | test cursor | queries on actual cursor |
  422. +========================+===================================================+
  423. |``cr = TestCursor(...)``| |
  424. +------------------------+---------------------------------------------------+
  425. | ``cr.execute(query)`` | SAVEPOINT test_cursor_N (if not savepoint) |
  426. | | query |
  427. +------------------------+---------------------------------------------------+
  428. | ``cr.commit()`` | RELEASE SAVEPOINT test_cursor_N (if savepoint) |
  429. +------------------------+---------------------------------------------------+
  430. | ``cr.rollback()`` | ROLLBACK TO SAVEPOINT test_cursor_N (if savepoint)|
  431. +------------------------+---------------------------------------------------+
  432. | ``cr.close()`` | ROLLBACK TO SAVEPOINT test_cursor_N (if savepoint)|
  433. | | RELEASE SAVEPOINT test_cursor_N (if savepoint) |
  434. +------------------------+---------------------------------------------------+
  435. """
  436. _cursors_stack = []
  437. def __init__(self, cursor, lock, readonly):
  438. assert isinstance(cursor, BaseCursor)
  439. super().__init__()
  440. self._now = None
  441. self._closed = False
  442. self._cursor = cursor
  443. self.readonly = readonly
  444. # we use a lock to serialize concurrent requests
  445. self._lock = lock
  446. self._lock.acquire()
  447. last_cursor = self._cursors_stack and self._cursors_stack[-1]
  448. if last_cursor and last_cursor.readonly and not readonly and last_cursor._savepoint:
  449. raise Exception('Opening a read/write test cursor from a readonly one')
  450. self._cursors_stack.append(self)
  451. # in order to simulate commit and rollback, the cursor maintains a
  452. # savepoint at its last commit, the savepoint is created lazily
  453. self._savepoint = None
  454. def _check_savepoint(self):
  455. if not self._savepoint:
  456. # we use self._cursor._obj for the savepoint to avoid having the
  457. # savepoint queries in the query counts, profiler, ...
  458. # Those queries are tests artefacts and should be invisible.
  459. self._savepoint = Savepoint(self._cursor._obj)
  460. if self.readonly:
  461. # this will simulate a readonly connection
  462. self._cursor._obj.execute('SET TRANSACTION READ ONLY') # use _obj to avoid impacting query count and profiler.
  463. def execute(self, *args, **kwargs):
  464. assert not self._closed, "Cannot use a closed cursor"
  465. self._check_savepoint()
  466. return self._cursor.execute(*args, **kwargs)
  467. def close(self):
  468. if not self._closed:
  469. self.rollback()
  470. self._closed = True
  471. if self._savepoint:
  472. self._savepoint.close(rollback=False)
  473. tos = self._cursors_stack.pop()
  474. if tos is not self:
  475. _logger.warning("Found different un-closed cursor when trying to close %s: %s", self, tos)
  476. self._lock.release()
  477. def commit(self):
  478. """ Perform an SQL `COMMIT` """
  479. self.flush()
  480. if self._savepoint:
  481. self._savepoint.close(rollback=self.readonly)
  482. self._savepoint = None
  483. self.clear()
  484. self.prerollback.clear()
  485. self.postrollback.clear()
  486. self.postcommit.clear() # TestCursor ignores post-commit hooks by default
  487. def rollback(self):
  488. """ Perform an SQL `ROLLBACK` """
  489. self.clear()
  490. self.postcommit.clear()
  491. self.prerollback.run()
  492. if self._savepoint:
  493. self._savepoint.close(rollback=True)
  494. self._savepoint = None
  495. self.postrollback.run()
  496. def __getattr__(self, name):
  497. return getattr(self._cursor, name)
  498. def now(self):
  499. """ Return the transaction's timestamp ``datetime.now()``. """
  500. if self._now is None:
  501. self._now = datetime.now()
  502. return self._now
  503. class PsycoConnection(psycopg2.extensions.connection):
  504. def lobject(*args, **kwargs):
  505. pass
  506. if hasattr(psycopg2.extensions, 'ConnectionInfo'):
  507. @property
  508. def info(self):
  509. class PsycoConnectionInfo(psycopg2.extensions.ConnectionInfo):
  510. @property
  511. def password(self):
  512. pass
  513. return PsycoConnectionInfo(self)
  514. class ConnectionPool(object):
  515. """ The pool of connections to database(s)
  516. Keep a set of connections to pg databases open, and reuse them
  517. to open cursors for all transactions.
  518. The connections are *not* automatically closed. Only a close_db()
  519. can trigger that.
  520. """
  521. def __init__(self, maxconn=64, readonly=False):
  522. self._connections = []
  523. self._maxconn = max(maxconn, 1)
  524. self._readonly = readonly
  525. self._lock = threading.Lock()
  526. def __repr__(self):
  527. used = len([1 for c, u, _ in self._connections[:] if u])
  528. count = len(self._connections)
  529. mode = 'read-only' if self._readonly else 'read/write'
  530. return f"ConnectionPool({mode};used={used}/count={count}/max={self._maxconn})"
  531. @property
  532. def readonly(self):
  533. return self._readonly
  534. def _debug(self, msg, *args):
  535. _logger_conn.debug(('%r ' + msg), self, *args)
  536. @locked
  537. def borrow(self, connection_info):
  538. """
  539. Borrow a PsycoConnection from the pool. If no connection is available, create a new one
  540. as long as there are still slots available. Perform some garbage-collection in the pool:
  541. idle, dead and leaked connections are removed.
  542. :param dict connection_info: dict of psql connection keywords
  543. :rtype: PsycoConnection
  544. """
  545. # free idle, dead and leaked connections
  546. for i, (cnx, used, last_used) in tools.reverse_enumerate(self._connections):
  547. if not used and not cnx.closed and time.time() - last_used > MAX_IDLE_TIMEOUT:
  548. self._debug('Close connection at index %d: %r', i, cnx.dsn)
  549. cnx.close()
  550. if cnx.closed:
  551. self._connections.pop(i)
  552. self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
  553. continue
  554. if getattr(cnx, 'leaked', False):
  555. delattr(cnx, 'leaked')
  556. self._connections[i][1] = False
  557. _logger.info('%r: Free leaked connection to %r', self, cnx.dsn)
  558. for i, (cnx, used, _) in enumerate(self._connections):
  559. if not used and self._dsn_equals(cnx.dsn, connection_info):
  560. try:
  561. cnx.reset()
  562. except psycopg2.OperationalError:
  563. self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
  564. # psycopg2 2.4.4 and earlier do not allow closing a closed connection
  565. if not cnx.closed:
  566. cnx.close()
  567. continue
  568. self._connections[i][1] = True
  569. self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
  570. return cnx
  571. if len(self._connections) >= self._maxconn:
  572. # try to remove the oldest connection not used
  573. for i, (cnx, used, _) in enumerate(self._connections):
  574. if not used:
  575. self._connections.pop(i)
  576. if not cnx.closed:
  577. cnx.close()
  578. self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
  579. break
  580. else:
  581. # note: this code is called only if the for loop has completed (no break)
  582. raise PoolError('The Connection Pool Is Full')
  583. try:
  584. result = psycopg2.connect(
  585. connection_factory=PsycoConnection,
  586. **connection_info)
  587. except psycopg2.Error:
  588. _logger.info('Connection to the database failed')
  589. raise
  590. self._connections.append([result, True, 0])
  591. self._debug('Create new connection backend PID %d', result.get_backend_pid())
  592. return result
  593. @locked
  594. def give_back(self, connection, keep_in_pool=True):
  595. self._debug('Give back connection to %r', connection.dsn)
  596. for i, (cnx, _, _) in enumerate(self._connections):
  597. if cnx is connection:
  598. if keep_in_pool:
  599. # Release the connection and record the last time used
  600. self._connections[i][1] = False
  601. self._connections[i][2] = time.time()
  602. self._debug('Put connection to %r in pool', cnx.dsn)
  603. else:
  604. self._connections.pop(i)
  605. self._debug('Forgot connection to %r', cnx.dsn)
  606. cnx.close()
  607. break
  608. else:
  609. raise PoolError('This connection does not belong to the pool')
  610. @locked
  611. def close_all(self, dsn=None):
  612. count = 0
  613. last = None
  614. for i, (cnx, _, _) in tools.reverse_enumerate(self._connections):
  615. if dsn is None or self._dsn_equals(cnx.dsn, dsn):
  616. cnx.close()
  617. last = self._connections.pop(i)[0]
  618. count += 1
  619. if count:
  620. _logger.info('%r: Closed %d connections %s', self, count,
  621. (dsn and last and 'to %r' % last.dsn) or '')
  622. def _dsn_equals(self, dsn1, dsn2):
  623. alias_keys = {'dbname': 'database'}
  624. ignore_keys = ['password']
  625. dsn1, dsn2 = ({
  626. alias_keys.get(key, key): str(value)
  627. for key, value in (psycopg2.extensions.parse_dsn(dsn) if isinstance(dsn, str) else dsn).items()
  628. if key not in ignore_keys
  629. } for dsn in (dsn1, dsn2))
  630. return dsn1 == dsn2
  631. class Connection(object):
  632. """ A lightweight instance of a connection to postgres
  633. """
  634. def __init__(self, pool, dbname, dsn):
  635. self.__dbname = dbname
  636. self.__dsn = dsn
  637. self.__pool = pool
  638. @property
  639. def dsn(self):
  640. dsn = dict(self.__dsn)
  641. dsn.pop('password', None)
  642. return dsn
  643. @property
  644. def dbname(self):
  645. return self.__dbname
  646. def cursor(self):
  647. _logger.debug('create cursor to %r', self.dsn)
  648. return Cursor(self.__pool, self.__dbname, self.__dsn)
  649. def __bool__(self):
  650. raise NotImplementedError()
  651. def connection_info_for(db_or_uri, readonly=False):
  652. """ parse the given `db_or_uri` and return a 2-tuple (dbname, connection_params)
  653. Connection params are either a dictionary with a single key ``dsn``
  654. containing a connection URI, or a dictionary containing connection
  655. parameter keywords which psycopg2 can build a key/value connection string
  656. (dsn) from
  657. :param str db_or_uri: database name or postgres dsn
  658. :param bool readonly: used to load
  659. the default configuration from ``db_`` or ``db_replica_``.
  660. :rtype: (str, dict)
  661. """
  662. if 'ODOO_PGAPPNAME' in os.environ:
  663. # Using manual string interpolation for security reason and trimming at default NAMEDATALEN=63
  664. app_name = os.environ['ODOO_PGAPPNAME'].replace('{pid}', str(os.getpid()))[0:63]
  665. else:
  666. app_name = "odoo-%d" % os.getpid()
  667. if db_or_uri.startswith(('postgresql://', 'postgres://')):
  668. # extract db from uri
  669. us = urls.url_parse(db_or_uri)
  670. if len(us.path) > 1:
  671. db_name = us.path[1:]
  672. elif us.username:
  673. db_name = us.username
  674. else:
  675. db_name = us.hostname
  676. return db_name, {'dsn': db_or_uri, 'application_name': app_name}
  677. connection_info = {'database': db_or_uri, 'application_name': app_name}
  678. for p in ('host', 'port', 'user', 'password', 'sslmode'):
  679. cfg = tools.config['db_' + p]
  680. if readonly:
  681. cfg = tools.config.get('db_replica_' + p, cfg)
  682. if cfg:
  683. connection_info[p] = cfg
  684. return db_or_uri, connection_info
  685. _Pool = None
  686. _Pool_readonly = None
  687. def db_connect(to, allow_uri=False, readonly=False):
  688. global _Pool, _Pool_readonly # noqa: PLW0603 (global-statement)
  689. maxconn = odoo.evented and tools.config['db_maxconn_gevent'] or tools.config['db_maxconn']
  690. if _Pool is None and not readonly:
  691. _Pool = ConnectionPool(int(maxconn), readonly=False)
  692. if _Pool_readonly is None and readonly:
  693. _Pool_readonly = ConnectionPool(int(maxconn), readonly=True)
  694. db, info = connection_info_for(to, readonly)
  695. if not allow_uri and db != to:
  696. raise ValueError('URI connections not allowed')
  697. return Connection(_Pool_readonly if readonly else _Pool, db, info)
  698. def close_db(db_name):
  699. """ You might want to call odoo.modules.registry.Registry.delete(db_name) along this function."""
  700. if _Pool:
  701. _Pool.close_all(connection_info_for(db_name)[1])
  702. if _Pool_readonly:
  703. _Pool_readonly.close_all(connection_info_for(db_name)[1])
  704. def close_all():
  705. if _Pool:
  706. _Pool.close_all()
  707. if _Pool_readonly:
  708. _Pool_readonly.close_all()
上海开阖软件有限公司 沪ICP备12045867号-1