gooderp18绿色标准版
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

234 line
8.5KB

  1. /*-------------------------------------------------------------------------
  2. *
  3. * lwlock.h
  4. * Lightweight lock manager
  5. *
  6. *
  7. * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
  8. * Portions Copyright (c) 1994, Regents of the University of California
  9. *
  10. * src/include/storage/lwlock.h
  11. *
  12. *-------------------------------------------------------------------------
  13. */
  14. #ifndef LWLOCK_H
  15. #define LWLOCK_H
  16. #ifdef FRONTEND
  17. #error "lwlock.h may not be included from frontend code"
  18. #endif
  19. #include "storage/proclist_types.h"
  20. #include "storage/s_lock.h"
  21. #include "port/atomics.h"
  22. struct PGPROC;
  23. /*
  24. * Code outside of lwlock.c should not manipulate the contents of this
  25. * structure directly, but we have to declare it here to allow LWLocks to be
  26. * incorporated into other data structures.
  27. */
  28. typedef struct LWLock
  29. {
  30. uint16 tranche; /* tranche ID */
  31. pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */
  32. proclist_head waiters; /* list of waiting PGPROCs */
  33. #ifdef LOCK_DEBUG
  34. pg_atomic_uint32 nwaiters; /* number of waiters */
  35. struct PGPROC *owner; /* last exclusive owner of the lock */
  36. #endif
  37. } LWLock;
  38. /*
  39. * In most cases, it's desirable to force each tranche of LWLocks to be aligned
  40. * on a cache line boundary and make the array stride a power of 2. This saves
  41. * a few cycles in indexing, but more importantly ensures that individual
  42. * LWLocks don't cross cache line boundaries. This reduces cache contention
  43. * problems, especially on AMD Opterons. In some cases, it's useful to add
  44. * even more padding so that each LWLock takes up an entire cache line; this is
  45. * useful, for example, in the main LWLock array, where the overall number of
  46. * locks is small but some are heavily contended.
  47. *
  48. * When allocating a tranche that contains data other than LWLocks, it is
  49. * probably best to include a bare LWLock and then pad the resulting structure
  50. * as necessary for performance. For an array that contains only LWLocks,
  51. * LWLockMinimallyPadded can be used for cases where we just want to ensure
  52. * that we don't cross cache line boundaries within a single lock, while
  53. * LWLockPadded can be used for cases where we want each lock to be an entire
  54. * cache line.
  55. *
  56. * An LWLockMinimallyPadded might contain more than the absolute minimum amount
  57. * of padding required to keep a lock from crossing a cache line boundary,
  58. * because an unpadded LWLock will normally fit into 16 bytes. We ignore that
  59. * possibility when determining the minimal amount of padding. Older releases
  60. * had larger LWLocks, so 32 really was the minimum, and packing them in
  61. * tighter might hurt performance.
  62. *
  63. * LWLOCK_MINIMAL_SIZE should be 32 on basically all common platforms, but
  64. * because pg_atomic_uint32 is more than 4 bytes on some obscure platforms, we
  65. * allow for the possibility that it might be 64. Even on those platforms,
  66. * we probably won't exceed 32 bytes unless LOCK_DEBUG is defined.
  67. */
  68. #define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE
  69. #define LWLOCK_MINIMAL_SIZE (sizeof(LWLock) <= 32 ? 32 : 64)
  70. /* LWLock, padded to a full cache line size */
  71. typedef union LWLockPadded
  72. {
  73. LWLock lock;
  74. char pad[LWLOCK_PADDED_SIZE];
  75. } LWLockPadded;
  76. /* LWLock, minimally padded */
  77. typedef union LWLockMinimallyPadded
  78. {
  79. LWLock lock;
  80. char pad[LWLOCK_MINIMAL_SIZE];
  81. } LWLockMinimallyPadded;
  82. extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
  83. extern const char *const MainLWLockNames[];
  84. /* struct for storing named tranche information */
  85. typedef struct NamedLWLockTranche
  86. {
  87. int trancheId;
  88. char *trancheName;
  89. } NamedLWLockTranche;
  90. extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray;
  91. extern PGDLLIMPORT int NamedLWLockTrancheRequests;
  92. /* Names for fixed lwlocks */
  93. #include "storage/lwlocknames.h"
  94. /*
  95. * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
  96. * here, but we need them to figure out offsets within MainLWLockArray, and
  97. * having this file include lock.h or bufmgr.h would be backwards.
  98. */
  99. /* Number of partitions of the shared buffer mapping hashtable */
  100. #define NUM_BUFFER_PARTITIONS 128
  101. /* Number of partitions the shared lock tables are divided into */
  102. #define LOG2_NUM_LOCK_PARTITIONS 4
  103. #define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
  104. /* Number of partitions the shared predicate lock tables are divided into */
  105. #define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
  106. #define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
  107. /* Offsets for various chunks of preallocated lwlocks. */
  108. #define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
  109. #define LOCK_MANAGER_LWLOCK_OFFSET \
  110. (BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
  111. #define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
  112. (LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS)
  113. #define NUM_FIXED_LWLOCKS \
  114. (PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
  115. typedef enum LWLockMode
  116. {
  117. LW_EXCLUSIVE,
  118. LW_SHARED,
  119. LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode,
  120. * when waiting for lock to become free. Not
  121. * to be used as LWLockAcquire argument */
  122. } LWLockMode;
  123. #ifdef LOCK_DEBUG
  124. extern bool Trace_lwlocks;
  125. #endif
  126. extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
  127. extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
  128. extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
  129. extern void LWLockRelease(LWLock *lock);
  130. extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
  131. extern void LWLockReleaseAll(void);
  132. extern bool LWLockHeldByMe(LWLock *lock);
  133. extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
  134. extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
  135. extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value);
  136. extern Size LWLockShmemSize(void);
  137. extern void CreateLWLocks(void);
  138. extern void InitLWLockAccess(void);
  139. extern const char *GetLWLockIdentifier(uint32 classId, uint16 eventId);
  140. /*
  141. * Extensions (or core code) can obtain an LWLocks by calling
  142. * RequestNamedLWLockTranche() during postmaster startup. Subsequently,
  143. * call GetNamedLWLockTranche() to obtain a pointer to an array containing
  144. * the number of LWLocks requested.
  145. */
  146. extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks);
  147. extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name);
  148. /*
  149. * There is another, more flexible method of obtaining lwlocks. First, call
  150. * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from
  151. * a shared counter. Next, each individual process using the tranche should
  152. * call LWLockRegisterTranche() to associate that tranche ID with a name.
  153. * Finally, LWLockInitialize should be called just once per lwlock, passing
  154. * the tranche ID as an argument.
  155. *
  156. * It may seem strange that each process using the tranche must register it
  157. * separately, but dynamic shared memory segments aren't guaranteed to be
  158. * mapped at the same address in all coordinating backends, so storing the
  159. * registration in the main shared memory segment wouldn't work for that case.
  160. */
  161. extern int LWLockNewTrancheId(void);
  162. extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name);
  163. extern void LWLockInitialize(LWLock *lock, int tranche_id);
  164. /*
  165. * Every tranche ID less than NUM_INDIVIDUAL_LWLOCKS is reserved; also,
  166. * we reserve additional tranche IDs for builtin tranches not included in
  167. * the set of individual LWLocks. A call to LWLockNewTrancheId will never
  168. * return a value less than LWTRANCHE_FIRST_USER_DEFINED.
  169. */
  170. typedef enum BuiltinTrancheIds
  171. {
  172. LWTRANCHE_CLOG_BUFFERS = NUM_INDIVIDUAL_LWLOCKS,
  173. LWTRANCHE_COMMITTS_BUFFERS,
  174. LWTRANCHE_SUBTRANS_BUFFERS,
  175. LWTRANCHE_MXACTOFFSET_BUFFERS,
  176. LWTRANCHE_MXACTMEMBER_BUFFERS,
  177. LWTRANCHE_ASYNC_BUFFERS,
  178. LWTRANCHE_OLDSERXID_BUFFERS,
  179. LWTRANCHE_WAL_INSERT,
  180. LWTRANCHE_BUFFER_CONTENT,
  181. LWTRANCHE_BUFFER_IO_IN_PROGRESS,
  182. LWTRANCHE_REPLICATION_ORIGIN,
  183. LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
  184. LWTRANCHE_PROC,
  185. LWTRANCHE_BUFFER_MAPPING,
  186. LWTRANCHE_LOCK_MANAGER,
  187. LWTRANCHE_PREDICATE_LOCK_MANAGER,
  188. LWTRANCHE_PARALLEL_HASH_JOIN,
  189. LWTRANCHE_PARALLEL_QUERY_DSA,
  190. LWTRANCHE_SESSION_DSA,
  191. LWTRANCHE_SESSION_RECORD_TABLE,
  192. LWTRANCHE_SESSION_TYPMOD_TABLE,
  193. LWTRANCHE_SHARED_TUPLESTORE,
  194. LWTRANCHE_TBM,
  195. LWTRANCHE_PARALLEL_APPEND,
  196. LWTRANCHE_SXACT,
  197. LWTRANCHE_FIRST_USER_DEFINED
  198. } BuiltinTrancheIds;
  199. /*
  200. * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer
  201. * to LWLocks. New code should instead use LWLock *. However, for the
  202. * convenience of third-party code, we include the following typedef.
  203. */
  204. typedef LWLock *LWLockId;
  205. #endif /* LWLOCK_H */
上海开阖软件有限公司 沪ICP备12045867号-1