MySQL 8.0.40
Source Code Documentation
lock0priv.h
Go to the documentation of this file.
1/*****************************************************************************
2
3Copyright (c) 2007, 2024, Oracle and/or its affiliates.
4
5This program is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License, version 2.0, as published by the
7Free Software Foundation.
8
9This program is designed to work with certain software (including
10but not limited to OpenSSL) that is licensed under separate terms,
11as designated in a particular file or component or in included license
12documentation. The authors of MySQL hereby grant you an additional
13permission to link the program and your derivative works with the
14separately licensed software that they have either included with
15the program or referenced in the documentation.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
20for more details.
21
22You should have received a copy of the GNU General Public License along with
23this program; if not, write to the Free Software Foundation, Inc.,
2451 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25
26*****************************************************************************/
27
28/** @file include/lock0priv.h
29 Lock module internal structures and methods.
30
31 Created July 12, 2007 Vasil Dimov
32 *******************************************************/
33
34#ifndef lock0priv_h
35#define lock0priv_h
36
37#ifndef LOCK_MODULE_IMPLEMENTATION
38/* If you need to access members of the structures defined in this
39file, please write appropriate functions that retrieve them and put
40those functions in lock/ */
41#error Do not include lock0priv.h outside of the lock/ module
42#endif
43
44#include "dict0types.h"
45#include "hash0hash.h"
46#include "trx0types.h"
47#include "univ.i"
48#include "ut0bitset.h"
49
50#include <scope_guard.h>
51#include <utility>
52
53/** A table lock */
55 dict_table_t *table; /*!< database table in dictionary
56 cache */
58 locks; /*!< list of locks on the same
59 table */
60 /** Print the table lock into the given output stream
61 @param[in,out] out the output stream
62 @return the given output stream. */
63 std::ostream &print(std::ostream &out) const;
64};
65
66/** Print the table lock into the given output stream
67@param[in,out] out the output stream
68@return the given output stream. */
69inline std::ostream &lock_table_t::print(std::ostream &out) const {
70 out << "[lock_table_t: name=" << table->name << "]";
71 return (out);
72}
73
74/** The global output operator is overloaded to conveniently
75print the lock_table_t object into the given output stream.
76@param[in,out] out the output stream
77@param[in] lock the table lock
78@return the given output stream */
79inline std::ostream &operator<<(std::ostream &out, const lock_table_t &lock) {
80 return (lock.print(out));
81}
82
83/** Record lock for a page */
84struct lock_rec_t {
85 /** The id of the page on which records referenced by this lock's bitmap are
86 located. */
88 /** number of bits in the lock bitmap;
89 Must be divisible by 8.
90 NOTE: the lock bitmap is placed immediately after the lock struct */
91 uint32_t n_bits;
92
93 /** Print the record lock into the given output stream
94 @param[in,out] out the output stream
95 @return the given output stream. */
96 std::ostream &print(std::ostream &out) const;
97};
98
99/** Print the record lock into the given output stream
100@param[in,out] out the output stream
101@return the given output stream. */
102inline std::ostream &lock_rec_t::print(std::ostream &out) const {
103 return out << "[lock_rec_t: page_id=" << page_id << ", n_bits=" << n_bits
104 << "]";
105}
106
107inline std::ostream &operator<<(std::ostream &out, const lock_rec_t &lock) {
108 return (lock.print(out));
109}
110
111/**
112Checks if the `mode` is LOCK_S or LOCK_X (possibly ORed with LOCK_WAIT or
113LOCK_REC) which means the lock is a
114Next Key Lock, a.k.a. LOCK_ORDINARY, as opposed to Predicate Lock,
115GAP lock, Insert Intention or Record Lock.
116@param mode A mode and flags, of a lock.
117@return true iff the only bits set in `mode` are LOCK_S or LOCK_X and optionally
118LOCK_WAIT or LOCK_REC */
120 static_assert(LOCK_ORDINARY == 0, "LOCK_ORDINARY must be 0 (no flags)");
121 ut_ad((mode & LOCK_TABLE) == 0);
122 mode &= ~(LOCK_WAIT | LOCK_REC);
123 ut_ad((mode & LOCK_WAIT) == 0);
124 ut_ad((mode & LOCK_TYPE_MASK) == 0);
126 (mode == LOCK_S || mode == LOCK_X));
127 return (mode & ~(LOCK_MODE_MASK)) == LOCK_ORDINARY;
128}
129
130/** Gets the nth bit of a record lock.
131@param[in] lock record lock
132@param[in] i index of the bit
133@return true if bit set also if i == ULINT_UNDEFINED return false */
134static inline bool lock_rec_get_nth_bit(const lock_t *lock, ulint i);
135
136/** Lock struct; protected by lock_sys latches */
137struct alignas(8 /* For efficient Bitmap::find_set */) lock_t {
138 /** transaction owning the lock */
140
141 /** list of the locks of the transaction */
143
144 /** Index for a record lock */
146
147 /** Hash chain node for a record lock. The link node in a singly
148 linked list, used by the hash table. */
150
151 union {
152 /** Table lock */
154
155 /** Record lock */
157 };
158
159#ifdef HAVE_PSI_THREAD_INTERFACE
160#ifdef HAVE_PSI_DATA_LOCK_INTERFACE
161 /** Performance schema thread that created the lock. */
163
164 /** Performance schema event that created the lock. */
166#endif /* HAVE_PSI_DATA_LOCK_INTERFACE */
167#endif /* HAVE_PSI_THREAD_INTERFACE */
168
169 /** The lock type and mode bit flags.
170 LOCK_GAP or LOCK_REC_NOT_GAP, LOCK_INSERT_INTENTION, wait flag, ORed */
171 uint32_t type_mode;
172
173#if defined(UNIV_DEBUG)
174 /** Timestamp when it was created. */
175 uint64_t m_seq;
176#endif /* UNIV_DEBUG */
177
178 /** Unlock the GAP Lock part of this Next Key Lock */
180 ut_ad(!is_gap());
181 ut_ad(!is_insert_intention());
182 ut_ad(is_next_key_lock());
183
184 type_mode |= LOCK_REC_NOT_GAP;
185 }
186
187 /** Determine if the lock object is a record lock.
188 @return true if record lock, false otherwise. */
189 bool is_record_lock() const { return (type() == LOCK_REC); }
190
191 /** Determine if it is predicate lock.
192 @return true if predicate lock, false otherwise. */
193 bool is_predicate() const {
194 return (type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
195 }
196
197 /** @return true if the lock wait flag is set */
198 bool is_waiting() const { return (type_mode & LOCK_WAIT); }
199
200 /** @return true if the gap lock bit is set */
201 bool is_gap() const { return (type_mode & LOCK_GAP); }
202
203 /** @return true if the not gap lock bit is set */
204 bool is_record_not_gap() const { return (type_mode & LOCK_REC_NOT_GAP); }
205
206 /** @return true iff the lock is a Next Key Lock */
207 bool is_next_key_lock() const {
208 return is_record_lock() && lock_mode_is_next_key_lock(type_mode);
209 }
210
211 /** @return true if the insert intention bit is set */
212 bool is_insert_intention() const {
213 return (type_mode & LOCK_INSERT_INTENTION);
214 }
215
216 /** @return true iff this lock is (at least) on supremum pseudo-record */
217 bool includes_supremum() const {
219 }
220
221 /** @return the lock mode */
222 uint32_t type() const { return (type_mode & LOCK_TYPE_MASK); }
223
224 /** @return the precise lock mode */
225 lock_mode mode() const {
226 return (static_cast<lock_mode>(type_mode & LOCK_MODE_MASK));
227 }
228
229 /** Get lock hash table
230 @return lock hash table */
231 Locks_hashtable &hash_table() const { return lock_hash_get(type_mode); }
232
233 /** @return the transaction's query thread state. */
234 trx_que_t trx_que_state() const { return (trx->lock.que_state); }
235
236 /** Print the lock object into the given output stream.
237 @param[in,out] out the output stream
238 @return the given output stream. */
239 std::ostream &print(std::ostream &out) const;
240
241 /** Convert the member 'type_mode' into a human readable string.
242 @return human readable string */
243 std::string type_mode_string() const;
244
245 /* @return the string/text representation of the record type. */
246 const char *type_string() const {
247 switch (type_mode & LOCK_TYPE_MASK) {
248 case LOCK_REC:
249 return ("LOCK_REC");
250 case LOCK_TABLE:
251 return ("LOCK_TABLE");
252 default:
253 ut_error;
254 }
255 }
256 /** @overload */
258 ut_ad(is_record_lock());
259 /* On a 32-bit system alignof(uint64_t) might be 4. Still, the
260 Bitmap::find_set goes into slow path if address is not a multiple of 8. */
261 static_assert(8 <= alignof(lock_t),
262 "lock_t and thus the bitmap after lock_t should be aligned "
263 "for efficient 64-bit access");
264 const byte *bitmap = (const byte *)&this[1];
265 /* static_assert verified the theory, but the actual allocation algorithm
266 used could assign a wrong address in practice */
267 ut_ad(reinterpret_cast<uintptr_t>(bitmap) % 8 == 0);
268 ut_ad(rec_lock.n_bits % 8 == 0);
269 return {bitmap, rec_lock.n_bits / 8};
270 }
271 /** Gets access to the LOCK_REC's bitmap, which indicates heap_no-s, which are
272 the subject of this lock request. This should be used directly only in the
273 lock-sys code. Use lock_rec_bitmap_reset(), lock_rec_reset_nth_bit(),
274 lock_rec_set_nth_bit(), and lock_rec_get_nth_bit() wrappers instead. In
275 particular this bitset might be shorter than actual number of heap_no-s on the
276 page! */
278 auto immutable = const_cast<const ib_lock_t *>(this)->bitset();
279 return {const_cast<byte *>(immutable.data()), immutable.size_bytes()};
280 }
281};
282
284 /** Functor for accessing the embedded node within a table lock. */
285 static const ut_list_node<lock_t> &get_node(const lock_t &lock) {
286 return lock.tab_lock.locks;
287 }
288};
289
291
292/** Convert the member 'type_mode' into a human readable string.
293@return human readable string */
294inline std::string lock_t::type_mode_string() const {
296 sout << type_string();
297 sout << " | " << lock_mode_string(mode());
298
299 if (is_record_not_gap()) {
300 sout << " | LOCK_REC_NOT_GAP";
301 }
302
303 if (is_waiting()) {
304 sout << " | LOCK_WAIT";
305 }
306
307 if (is_gap()) {
308 sout << " | LOCK_GAP";
309 }
310
311 if (is_insert_intention()) {
312 sout << " | LOCK_INSERT_INTENTION";
313 }
314 return (sout.str());
315}
316
317inline std::ostream &lock_t::print(std::ostream &out) const {
318 out << "[lock_t: type_mode=" << type_mode << "(" << type_mode_string() << ")";
319
320 if (is_record_lock()) {
321 out << rec_lock;
322 } else {
323 out << tab_lock;
324 }
325
326 out << "]";
327 return (out);
328}
329
330inline std::ostream &operator<<(std::ostream &out, const lock_t &lock) {
331 return (lock.print(out));
332}
333
334#ifdef UNIV_DEBUG
335extern bool lock_print_waits;
336#endif /* UNIV_DEBUG */
337
338/* Safety margin when creating a new record lock: this many extra records
339can be inserted to the page without need to create a lock with a bigger
340bitmap */
341
343
344/* An explicit record lock affects both the record and the gap before it.
345An implicit x-lock does not affect the gap, it only locks the index
346record from read or update.
347
348If a transaction has modified or inserted an index record, then
349it owns an implicit x-lock on the record. On a secondary index record,
350a transaction has an implicit x-lock also if it has modified the
351clustered index record, the max trx id of the page where the secondary
352index record resides is >= trx id of the transaction (or database recovery
353is running), and there are no explicit non-gap lock requests on the
354secondary index record.
355
356This complicated definition for a secondary index comes from the
357implementation: we want to be able to determine if a secondary index
358record has an implicit x-lock, just by looking at the present clustered
359index record, not at the historical versions of the record. The
360complicated definition can be explained to the user so that there is
361nondeterminism in the access path when a query is answered: we may,
362or may not, access the clustered index record and thus may, or may not,
363bump into an x-lock set there.
364
365Different transaction can have conflicting locks set on the gap at the
366same time. The locks on the gap are purely inhibitive: an insert cannot
367be made, or a select cursor may have to wait if a different transaction
368has a conflicting lock on the gap. An x-lock on the gap does not give
369the right to insert into the gap.
370
371An explicit lock can be placed on a user record or the supremum record of
372a page. The locks on the supremum record are always thought to be of the gap
373type, though the gap bit is not set. When we perform an update of a record
374where the size of the record changes, we may temporarily store its explicit
375locks on the infimum record of the page, though the infimum otherwise never
376carries locks.
377
378A waiting record lock can also be of the gap type. A waiting lock request
379can be granted when there is no conflicting mode lock request by another
380transaction ahead of it in the explicit lock queue.
381
382In version 4.0.5 we added yet another explicit lock type: LOCK_REC_NOT_GAP.
383It only locks the record it is placed on, not the gap before the record.
384This lock type is necessary to emulate an Oracle-like READ COMMITTED isolation
385level.
386
387-------------------------------------------------------------------------
388RULE 1: If there is an implicit x-lock on a record, and there are non-gap
389-------
390lock requests waiting in the queue, then the transaction holding the implicit
391x-lock also has an explicit non-gap record x-lock. Therefore, as locks are
392released, we can grant locks to waiting lock requests purely by looking at
393the explicit lock requests in the queue.
394
395RULE 3: Different transactions cannot have conflicting granted non-gap locks
396-------
397on a record at the same time. However, they can have conflicting granted gap
398locks.
399RULE 4: If a there is a waiting lock request in a queue, no lock request,
400-------
401gap or not, can be inserted ahead of it in the queue. In record deletes
402and page splits new gap type locks can be created by the database manager
403for a transaction, and without rule 4, the waits-for graph of transactions
404might become cyclic without the database noticing it, as the deadlock check
405is only performed when a transaction itself requests a lock!
406-------------------------------------------------------------------------
407
408An insert is allowed to a gap if there are no explicit lock requests by
409other transactions on the next record. It does not matter if these lock
410requests are granted or waiting, gap bit set or not, with the exception
411that a gap type request set by another transaction to wait for
412its turn to do an insert is ignored. On the other hand, an
413implicit x-lock by another transaction does not prevent an insert, which
414allows for more concurrency when using an Oracle-style sequence number
415generator for the primary key with many transactions doing inserts
416concurrently.
417
418A modify of a record is allowed if the transaction has an x-lock on the
419record, or if other transactions do not have any non-gap lock requests on the
420record.
421
422A read of a single user record with a cursor is allowed if the transaction
423has a non-gap explicit, or an implicit lock on the record, or if the other
424transactions have no x-lock requests on the record. At a page supremum a
425read is always allowed.
426
427In summary, an implicit lock is seen as a granted x-lock only on the
428record, not on the gap. An explicit lock with no gap bit set is a lock
429both on the record and the gap. If the gap bit is set, the lock is only
430on the gap. Different transaction cannot own conflicting locks on the
431record at the same time, but they may own conflicting locks on the gap.
432Granted locks on a record give an access right to the record, but gap type
433locks just inhibit operations.
434
435NOTE: Finding out if some transaction has an implicit x-lock on a secondary
436index record can be cumbersome. We may have to look at previous versions of
437the corresponding clustered index record to find out if a delete marked
438secondary index record was delete marked by an active transaction, not by
439a committed one.
440
441FACT A: If a transaction has inserted a row, it can delete it any time
442without need to wait for locks.
443
444PROOF: The transaction has an implicit x-lock on every index record inserted
445for the row, and can thus modify each record without the need to wait. Q.E.D.
446
447FACT B: If a transaction has read some result set with a cursor, it can read
448it again, and retrieves the same result set, if it has not modified the
449result set in the meantime. Hence, there is no phantom problem. If the
450biggest record, in the alphabetical order, touched by the cursor is removed,
451a lock wait may occur, otherwise not.
452
453PROOF: When a read cursor proceeds, it sets an s-lock on each user record
454it passes, and a gap type s-lock on each page supremum. The cursor must
455wait until it has these locks granted. Then no other transaction can
456have a granted x-lock on any of the user records, and therefore cannot
457modify the user records. Neither can any other transaction insert into
458the gaps which were passed over by the cursor. Page splits and merges,
459and removal of obsolete versions of records do not affect this, because
460when a user record or a page supremum is removed, the next record inherits
461its locks as gap type locks, and therefore blocks inserts to the same gap.
462Also, if a page supremum is inserted, it inherits its locks from the successor
463record. When the cursor is positioned again at the start of the result set,
464the records it will touch on its course are either records it touched
465during the last pass or new inserted page supremums. It can immediately
466access all these records, and when it arrives at the biggest record, it
467notices that the result set is complete. If the biggest record was removed,
468lock wait can occur because the next record only inherits a gap type lock,
469and a wait may be needed. Q.E.D. */
470
471/* If an index record should be changed or a new inserted, we must check
472the lock on the record or the next. When a read cursor starts reading,
473we will set a record level s-lock on each record it passes, except on the
474initial record on which the cursor is positioned before we start to fetch
475records. Our index tree search has the convention that the B-tree
476cursor is positioned BEFORE the first possibly matching record in
477the search. Optimizations are possible here: if the record is searched
478on an equality condition to a unique key, we could actually set a special
479lock on the record, a lock which would not prevent any insert before
480this record. In the next key locking an x-lock set on a record also
481prevents inserts just before that record.
482 There are special infimum and supremum records on each page.
483A supremum record can be locked by a read cursor. This records cannot be
484updated but the lock prevents insert of a user record to the end of
485the page.
486 Next key locks will prevent the phantom problem where new rows
487could appear to SELECT result sets after the select operation has been
488performed. Prevention of phantoms ensures the serilizability of
489transactions.
490 What should we check if an insert of a new record is wanted?
491Only the lock on the next record on the same page, because also the
492supremum record can carry a lock. An s-lock prevents insertion, but
493what about an x-lock? If it was set by a searched update, then there
494is implicitly an s-lock, too, and the insert should be prevented.
495What if our transaction owns an x-lock to the next record, but there is
496a waiting s-lock request on the next record? If this s-lock was placed
497by a read cursor moving in the ascending order in the index, we cannot
498do the insert immediately, because when we finally commit our transaction,
499the read cursor should see also the new inserted record. So we should
500move the read cursor backward from the next record for it to pass over
501the new inserted record. This move backward may be too cumbersome to
502implement. If we in this situation just enqueue a second x-lock request
503for our transaction on the next record, then the deadlock mechanism
504notices a deadlock between our transaction and the s-lock request
505transaction. This seems to be an ok solution.
506 We could have the convention that granted explicit record locks,
507lock the corresponding records from changing, and also lock the gaps
508before them from inserting. A waiting explicit lock request locks the gap
509before from inserting. Implicit record x-locks, which we derive from the
510transaction id in the clustered index record, only lock the record itself
511from modification, not the gap before it from inserting.
512 How should we store update locks? If the search is done by a unique
513key, we could just modify the record trx id. Otherwise, we could put a record
514x-lock on the record. If the update changes ordering fields of the
515clustered index record, the inserted new record needs no record lock in
516lock table, the trx id is enough. The same holds for a secondary index
517record. Searched delete is similar to update.
518
519PROBLEM:
520What about waiting lock requests? If a transaction is waiting to make an
521update to a record which another modified, how does the other transaction
522know to send the end-lock-wait signal to the waiting transaction? If we have
523the convention that a transaction may wait for just one lock at a time, how
524do we preserve it if lock wait ends?
525
526PROBLEM:
527Checking the trx id label of a secondary index record. In the case of a
528modification, not an insert, is this necessary? A secondary index record
529is modified only by setting or resetting its deleted flag. A secondary index
530record contains fields to uniquely determine the corresponding clustered
531index record. A secondary index record is therefore only modified if we
532also modify the clustered index record, and the trx id checking is done
533on the clustered index record, before we come to modify the secondary index
534record. So, in the case of delete marking or unmarking a secondary index
535record, we do not have to care about trx ids, only the locks in the lock
536table must be checked. In the case of a select from a secondary index, the
537trx id is relevant, and in this case we may have to search the clustered
538index record.
539
540PROBLEM: How to update record locks when page is split or merged, or
541--------------------------------------------------------------------
542a record is deleted or updated?
543If the size of fields in a record changes, we perform the update by
544a delete followed by an insert. How can we retain the locks set or
545waiting on the record? Because a record lock is indexed in the bitmap
546by the heap number of the record, when we remove the record from the
547record list, it is possible still to keep the lock bits. If the page
548is reorganized, we could make a table of old and new heap numbers,
549and permute the bitmaps in the locks accordingly. We can add to the
550table a row telling where the updated record ended. If the update does
551not require a reorganization of the page, we can simply move the lock
552bits for the updated record to the position determined by its new heap
553number (we may have to allocate a new lock, if we run out of the bitmap
554in the old one).
555 A more complicated case is the one where the reinsertion of the
556updated record is done pessimistically, because the structure of the
557tree may change.
558
559PROBLEM: If a supremum record is removed in a page merge, or a record
560---------------------------------------------------------------------
561removed in a purge, what to do to the waiting lock requests? In a split to
562the right, we just move the lock requests to the new supremum. If a record
563is removed, we could move the waiting lock request to its inheritor, the
564next record in the index. But, the next record may already have lock
565requests on its own queue. A new deadlock check should be made then. Maybe
566it is easier just to release the waiting transactions. They can then enqueue
567new lock requests on appropriate records.
568
569PROBLEM: When a record is inserted, what locks should it inherit from the
570-------------------------------------------------------------------------
571upper neighbor? An insert of a new supremum record in a page split is
572always possible, but an insert of a new user record requires that the upper
573neighbor does not have any lock requests by other transactions, granted or
574waiting, in its lock queue. Solution: We can copy the locks as gap type
575locks, so that also the waiting locks are transformed to granted gap type
576locks on the inserted record. */
577
578/* LOCK COMPATIBILITY MATRIX
579 IS IX S X AI
580 IS + + + - +
581 IX + + - - +
582 S + - + - -
583 X - - - - -
584 AI + + - - -
585 *
586 Note that for rows, InnoDB only acquires S or X locks.
587 For tables, InnoDB normally acquires IS or IX locks.
588 S or X table locks are only acquired for LOCK TABLES.
589 Auto-increment (AI) locks are needed because of
590 statement-level MySQL binlog.
591 See also lock_mode_compatible().
592 */
593static const byte lock_compatibility_matrix[5][5] = {
594 /** IS IX S X AI */
595 /* IS */ {true, true, true, false, true},
596 /* IX */ {true, true, false, false, true},
597 /* S */ {true, false, true, false, false},
598 /* X */ {false, false, false, false, false},
599 /* AI */ {true, true, false, false, false}};
600
601/* STRONGER-OR-EQUAL RELATION (mode1=row, mode2=column)
602 IS IX S X AI
603 IS + - - - -
604 IX + + - - -
605 S + - + - -
606 X + + + + +
607 AI - - - - +
608 See lock_mode_stronger_or_eq().
609 */
610static const byte lock_strength_matrix[5][5] = {
611 /** IS IX S X AI */
612 /* IS */ {true, false, false, false, false},
613 /* IX */ {true, true, false, false, false},
614 /* S */ {true, false, true, false, false},
615 /* X */ {true, true, true, true, true},
616 /* AI */ {false, false, false, false, true}};
617
618/** Maximum depth of the DFS stack. */
619constexpr uint32_t MAX_STACK_SIZE = 4096;
620
622/** Record locking request status */
624 /** Failed to acquire a lock */
626 /** Succeeded in acquiring a lock (implicit or already acquired) */
628 /** Explicitly created a new lock */
631
632/**
633Record lock ID */
634struct RecID {
635 /** Constructor
636 @param[in] lock Record lock
637 @param[in] heap_no Heap number in the page */
638 RecID(const lock_t *lock, ulint heap_no)
639 : RecID(lock->rec_lock.page_id, heap_no) {
640 ut_ad(lock->is_record_lock());
641 }
642
643 /** Constructor
644 @param[in] page_id Tablespace ID and page number within space
645 @param[in] heap_no Heap number in the page */
646 RecID(page_id_t page_id, uint32_t heap_no)
647 : m_page_id(page_id),
648 m_heap_no(heap_no),
649 m_hash_value(lock_rec_hash_value(page_id)) {
650 ut_ad(m_page_id.space() < UINT32_MAX);
651 ut_ad(m_page_id.page_no() < UINT32_MAX);
652 ut_ad(m_heap_no < UINT32_MAX);
653 }
654
655 /** Constructor
656 @param[in] block Block in a tablespace
657 @param[in] heap_no Heap number in the block */
658 RecID(const buf_block_t *block, ulint heap_no)
659 : RecID(block->get_page_id(), heap_no) {}
660
661 /**
662 @return the hashed value of {space, page_no} */
663 uint64_t hash_value() const { return (m_hash_value); }
664
665 /** @return true if it's the supremum record */
666 bool is_supremum() const { return (m_heap_no == PAGE_HEAP_NO_SUPREMUM); }
667
668 /* Check if the rec id matches the lock instance.
669 @param[i] lock Lock to compare with
670 @return true if <space, page_no, heap_no> matches the lock. */
671 inline bool matches(const lock_t *lock) const;
672
673 const page_id_t &get_page_id() const { return m_page_id; }
674
675 /** Tablespace ID and page number within space */
677
678 /**
679 Heap number within the page */
680 uint32_t m_heap_no;
681
682 /**
683 Hash generated from record's location which will be used to get lock queue for
684 this record. */
685 uint64_t m_hash_value;
686};
687
688/**
689Create record locks */
690class RecLock {
691 public:
692 /**
693 @param[in,out] thr Transaction query thread requesting the record
694 lock
695 @param[in] index Index on which record lock requested
696 @param[in] rec_id Record lock tuple {space, page_no, heap_no}
697 @param[in] mode The lock mode */
698 RecLock(que_thr_t *thr, dict_index_t *index, const RecID &rec_id, ulint mode)
699 : m_thr(thr),
700 m_trx(thr_get_trx(thr)),
701 m_mode(mode),
702 m_index(index),
703 m_rec_id(rec_id) {
704 ut_ad(is_predicate_lock(m_mode));
705
706 init(nullptr);
707 }
708
709 /**
710 @param[in,out] thr Transaction query thread requesting the record
711 lock
712 @param[in] index Index on which record lock requested
713 @param[in] block Buffer page containing record
714 @param[in] heap_no Heap number within the block
715 @param[in] mode The lock mode */
716 RecLock(que_thr_t *thr, dict_index_t *index, const buf_block_t *block,
717 ulint heap_no, ulint mode)
718 : m_thr(thr),
719 m_trx(thr_get_trx(thr)),
720 m_mode(mode),
721 m_index(index),
722 m_rec_id(block, heap_no) {
723 btr_assert_not_corrupted(block, index);
724
725 init(block->frame);
726 }
727
728 /**
729 @param[in] index Index on which record lock requested
730 @param[in] rec_id Record lock tuple {space, page_no, heap_no}
731 @param[in] mode The lock mode */
732 RecLock(dict_index_t *index, const RecID &rec_id, ulint mode)
733 : m_thr(), m_trx(), m_mode(mode), m_index(index), m_rec_id(rec_id) {
734 ut_ad(is_predicate_lock(m_mode));
735
736 init(nullptr);
737 }
738
739 /**
740 @param[in] index Index on which record lock requested
741 @param[in] block Buffer page containing record
742 @param[in] heap_no Heap number within block
743 @param[in] mode The lock mode */
744 RecLock(dict_index_t *index, const buf_block_t *block, ulint heap_no,
745 ulint mode)
746 : m_thr(),
747 m_trx(),
748 m_mode(mode),
749 m_index(index),
750 m_rec_id(block, heap_no) {
751 btr_assert_not_corrupted(block, index);
752
753 init(block->frame);
754 }
755
756 /**
757 Enqueue a lock wait for a transaction. If it is a high priority transaction
758 (cannot rollback) then try to jump ahead in the record lock wait queue. Also
759 check if async rollback was request for our trx.
760 @param[in, out] wait_for The lock that the the joining transaction is
761 waiting for
762 @param[in] prdt Predicate [optional]
763 @return DB_LOCK_WAIT, DB_DEADLOCK, or DB_SUCCESS_LOCKED_REC
764 @retval DB_DEADLOCK means that async rollback was requested for our trx
765 @retval DB_SUCCESS_LOCKED_REC means that we are High Priority transaction and
766 we've managed to jump in front of other waiting
767 transactions and got the lock granted, so there
768 is no need to wait. */
769 dberr_t add_to_waitq(const lock_t *wait_for,
770 const lock_prdt_t *prdt = nullptr);
771
772 /**
773 Create a lock for a transaction and initialise it.
774 @param[in, out] trx Transaction requesting the new lock
775 @param[in] prdt Predicate lock (optional)
776 @return new lock instance */
777 lock_t *create(trx_t *trx, const lock_prdt_t *prdt = nullptr);
778
779 /**
780 Create the lock instance
781 @param[in, out] trx The transaction requesting the lock
782 @param[in, out] index Index on which record lock is required
783 @param[in] mode The lock mode desired
784 @param[in] rec_id The record id
785 @param[in] size Size of the lock + bitmap requested
786 @return a record lock instance */
787 static lock_t *lock_alloc(trx_t *trx, dict_index_t *index, ulint mode,
788 const RecID &rec_id, ulint size);
789
790 private:
791 /*
792 @return the record lock size in bytes */
793 size_t lock_size() const { return (m_size); }
794
795 /**
796 Do some checks and prepare for creating a new record lock */
797 void prepare() const;
798
799 /**
800 Setup the requesting transaction state for lock grant
801 @param[in,out] lock Lock for which to change state */
802 void set_wait_state(lock_t *lock);
803
804 /**
805 Add the lock to the record lock hash and the transaction's lock list
806 @param[in,out] lock Newly created record lock to add to the
807 rec hash and the transaction lock list */
808 void lock_add(lock_t *lock);
809
810 /**
811 Setup the context from the requirements */
812 void init(const page_t *page) {
813 ut_ad(locksys::owns_page_shard(m_rec_id.get_page_id()));
815 ut_ad(m_index->is_clustered() || !dict_index_is_online_ddl(m_index));
816 ut_ad(m_thr == nullptr || m_trx == thr_get_trx(m_thr));
817
818 m_size = is_predicate_lock(m_mode) ? lock_size(m_mode) : lock_size(page);
819
820 /** If rec is the supremum record, then we reset the
821 gap and LOCK_REC_NOT_GAP bits, as all locks on the
822 supremum are automatically of the gap type */
823
824 if (m_rec_id.m_heap_no == PAGE_HEAP_NO_SUPREMUM) {
825 ut_ad(!(m_mode & LOCK_REC_NOT_GAP));
826
827 m_mode &= ~(LOCK_GAP | LOCK_REC_NOT_GAP);
828 }
829 }
830
831 /**
832 Calculate the record lock physical size required for a predicate lock.
833 @param[in] mode For predicate locks the lock mode
834 @return the size of the lock data structure required in bytes */
835 static size_t lock_size(ulint mode) {
836 ut_ad(is_predicate_lock(mode));
837
838 /* The lock is always on PAGE_HEAP_NO_INFIMUM(0),
839 so we only need 1 bit (which is rounded up to 1
840 byte) for lock bit setting */
841
842 size_t n_bytes;
843
844 if (mode & LOCK_PREDICATE) {
845 const ulint align = UNIV_WORD_SIZE - 1;
846
847 /* We will attach the predicate structure
848 after lock. Make sure the memory is
849 aligned on 8 bytes, the mem_heap_alloc
850 will align it with MEM_SPACE_NEEDED
851 anyway. */
852
853 n_bytes = (1 + sizeof(lock_prdt_t) + align) & ~align;
854
855 /* This should hold now */
856
857 ut_ad(n_bytes == sizeof(lock_prdt_t) + UNIV_WORD_SIZE);
858
859 } else {
860 n_bytes = 1;
861 }
862
863 return (n_bytes);
864 }
865
866 /**
867 Calculate the record lock physical size required, non-predicate lock.
868 @param[in] page For non-predicate locks the buffer page
869 @return the size of the lock data structure required in bytes */
870 static size_t lock_size(const page_t *page) {
872
873 /* Make lock bitmap bigger by a safety margin */
874
875 return (1 + ((n_recs + LOCK_PAGE_BITMAP_MARGIN) / 8));
876 }
877
878 /**
879 @return true if the requested lock mode is for a predicate
880 or page lock */
882 return (mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
883 }
884
885 private:
886 /** The query thread of the transaction */
888
889 /**
890 Transaction requesting the record lock */
892
893 /**
894 Lock mode requested */
896
897 /**
898 Size of the record lock in bytes */
899 size_t m_size;
900
901 /**
902 Index on which the record lock is required */
904
905 /**
906 The record lock tuple {space, page_no, heap_no} */
908};
909
910#ifdef UNIV_DEBUG
911/** The count of the types of locks. */
913#endif /* UNIV_DEBUG */
914
915/** Gets the type of a lock.
916 @return LOCK_TABLE or LOCK_REC */
917static inline uint32_t lock_get_type_low(const lock_t *lock); /*!< in: lock */
918
919/** Cancels a waiting lock request and releases possible other transactions
920waiting behind it.
921@param[in,out] trx The transaction waiting for a lock */
923
924/** This function is a wrapper around several functions which need to be called
925in particular order to wake up a transaction waiting for a lock.
926You should not call lock_wait_release_thread_if_suspended(thr) directly,
927but rather use this wrapper, as this makes it much easier to reason about all
928possible states in which lock, trx, and thr can be.
929It makes sure that trx is woken up exactly once, and only if it already went to
930sleep.
931@param[in, out] lock The lock for which lock->trx is waiting */
933
934/** Checks if some transaction has an implicit x-lock on a record in a clustered
935index.
936@param[in] rec User record.
937@param[in] index Clustered index.
938@param[in] offsets rec_get_offsets(rec, index)
939@return transaction id of the transaction which has the x-lock, or 0 */
940[[nodiscard]] static inline trx_id_t lock_clust_rec_some_has_impl(
941 const rec_t *rec, const dict_index_t *index, const ulint *offsets);
942
943/** Gets the number of bits in a record lock bitmap.
944@param[in] lock The record lock
945@return number of bits */
946static inline uint32_t lock_rec_get_n_bits(const lock_t *lock);
947
948/** Sets the nth bit of a record lock to true.
949@param[in] lock record lock
950@param[in] i index of the bit */
951static inline void lock_rec_set_nth_bit(lock_t *lock, ulint i);
952
953/** Gets the mode of a lock.
954 @return mode */
955static inline enum lock_mode lock_get_mode(const lock_t *lock); /*!< in: lock */
956
957/** Calculates if lock mode 1 is compatible with lock mode 2.
958@param[in] mode1 lock mode
959@param[in] mode2 lock mode
960@return nonzero if mode1 compatible with mode2 */
961static inline ulint lock_mode_compatible(enum lock_mode mode1,
962 enum lock_mode mode2);
963
964/** Calculates if lock mode 1 is stronger or equal to lock mode 2.
965@param[in] mode1 lock mode 1
966@param[in] mode2 lock mode 2
967@return true iff mode1 stronger or equal to mode2 */
968static inline bool lock_mode_stronger_or_eq(enum lock_mode mode1,
969 enum lock_mode mode2);
970
971/** Gets the wait flag of a lock.
972 @return LOCK_WAIT if waiting, 0 if not */
973static inline ulint lock_get_wait(const lock_t *lock); /*!< in: lock */
974
975/** Checks if a transaction has the specified table lock, or stronger. This
976function should only be called by the thread that owns the transaction.
977This function acquires trx->mutex which protects trx->lock.trx_locks, but you
978should understand that this only makes it easier to argue against races at the
979level of access to the data structure, yet does not buy us any protection at
980the higher level of making actual decisions based on the result of this call -
981it may happen that another thread is removing a table lock,
982and even though lock_table_has returned true to the caller, the lock is no
983longer in possession of trx once the caller gets to evaluate if/else condition
984based on the result.
985Therefore it is up to caller to make sure that the context of the call to this
986function and making any decisions based on the result is protected from any
987concurrent modifications. This in turn makes the whole trx_mutex_enter/exit
988a bit redundant, but it does not affect performance yet makes the reasoning
989about data structure a bit easier and protects trx->lock.trx_locks data
990structure from corruption in case our high level reasoning about absence of
991parallel modifications turns out wrong.
992@param[in] trx transaction
993@param[in] table table
994@param[in] mode lock mode
995@return lock or NULL */
996static inline bool lock_table_has(const trx_t *trx, const dict_table_t *table,
997 enum lock_mode mode);
998
999/** Handles writing the information about found deadlock to the log files
1000and caches it for future lock_latest_err_file() calls (for example used by
1001SHOW ENGINE INNODB STATUS)
1002@param[in] trxs_on_cycle trxs causing deadlock, i-th waits for i+1-th
1003@param[in] victim_trx the trx from trx_on_cycle which will be rolled back */
1005 const trx_t *victim_trx);
1006
1007#include "lock0priv.ic"
1008
1009namespace locksys {
1011 public:
1013 static void exclusive_latch(ut::Location location) {
1015 }
1016};
1017
1018/** Temporarily releases trx->mutex, latches the lock-sys shard containing
1019peeked_lock and latches trx->mutex again and calls f under protection of both
1020latches. The latch on lock-sys shard will be released immediately after f
1021returns. It is a responsibility of the caller to handle shared lock-sys latch,
1022trx->mutex and verify inside f that the trx has not been finished, and the lock
1023was not released meanwhile.
1024@param[in] peeked_lock A lock of the trx. (While trx->mutex is held it can't
1025 be freed, but can be released). It is used to
1026 determine the lock-sys shard to latch.
1027@param[in] f The callback to call once the lock-sys shard is
1028 latched and trx->mutex is relatched.
1029@return The value returned by f.
1030*/
1031template <typename F>
1032auto latch_peeked_shard_and_do(const lock_t *peeked_lock, F &&f) {
1034 const trx_t *trx = peeked_lock->trx;
1035 ut_ad(trx_mutex_own(trx));
1036 ut_ad(peeked_lock->trx == trx);
1037 /* peeked_wait_lock points to a lock struct which will not be freed while we
1038 hold trx->mutex. Thus it is safe to inspect the peeked_wait_lock's
1039 rec_lock.page_id and tab_lock.table. We have to make a copy of them, though,
1040 before releasing trx->mutex. */
1041 if (peeked_lock->is_record_lock()) {
1042 const auto sharded_by = peeked_lock->rec_lock.page_id;
1043 trx_mutex_exit(trx);
1044 DEBUG_SYNC_C("try_relatch_trx_and_shard_and_do_noted_expected_version");
1047 return std::forward<F>(f)();
1048 } else {
1049 /*Once we release the trx->mutex, the trx may release locks on table and
1050 commit, which in extreme case could lead to freeing the dict_table_t
1051 object, so we have to copy its id first. */
1052 const auto sharded_by = peeked_lock->tab_lock.table->id;
1053 trx_mutex_exit(trx);
1056 return std::forward<F>(f)();
1057 }
1058}
1059
1060/** Given a pointer to trx (which the caller guarantees will not be freed) and
1061the expected value of trx->version, will call the provided function f, only if
1062the trx is still in expected version and waiting for a lock, within a critical
1063section which holds latches on the trx, and the shard containing the waiting
1064lock. If the transaction has meanwhile finished waiting for a lock, or committed
1065or rolled back etc. the f will not be called.
1066It may happen that the lock for which the trx is waiting during exectuion of f
1067is not the same as the lock it was waiting at the moment of invocation.
1068@param[in] trx_version The version of the trx that we intend to wake up
1069@param[in] f The callback to call if trx is still waiting for a
1070 lock and is still in version trx_version
1071*/
1072template <typename F>
1073void run_if_waiting(const TrxVersion trx_version, F &&f) {
1074 const trx_t *trx = trx_version.m_trx;
1075 /* This code would be much simpler with Global_exclusive_latch_guard.
1076 Unfortunately, this lead to long semaphore waits when thousands of
1077 transactions were taking thousands of locks and timing out. Therefore we use
1078 the following tricky code to instead only latch the single shard which
1079 contains the trx->lock.wait_lock. This is a bit difficult, because during
1080 B-tree reorganization a record lock might be removed from one page and moved
1081 to another, temporarily setting wait_lock to nullptr. This should be very
1082 rare and short. In most cases this while loop should do just one iteration
1083 and proceed along a happy path through all ifs. Another reason wait_lock
1084 might become nullptr is because we were granted the lock meanwhile, in which
1085 case the trx->lock.blocking_trx is first set to nullptr */
1086 do {
1087 if (!trx->lock.wait_lock.load()) {
1088 continue;
1089 }
1091 /* We can't use IB_mutex_guard with trx->mutex, as trx_mutex_enter has
1092 custom logic. We want to release trx->mutex before ut_delay or return. */
1093 trx_mutex_enter(trx);
1094 auto guard = create_scope_guard([trx]() { trx_mutex_exit(trx); });
1095 if (trx->version != trx_version.m_version) {
1096 return;
1097 }
1098 if (const lock_t *peeked_wait_lock = trx->lock.wait_lock.load()) {
1099 const bool retry = latch_peeked_shard_and_do(peeked_wait_lock, [&]() {
1100 ut_ad(trx_mutex_own(trx));
1101 if (trx->version != trx_version.m_version) {
1102 return false;
1103 }
1104 if (peeked_wait_lock != trx->lock.wait_lock.load()) {
1105 /* If wait_lock has changed, then in case of record lock it might have
1106 been moved during B-tree reorganization, so we retry. In case of a
1107 table lock the wait_lock can not be "moved" so it had to be released
1108 permanently and there's no point in retrying.*/
1109 return peeked_wait_lock->is_record_lock();
1110 }
1111 std::forward<F>(f)();
1112 ut_ad(trx_mutex_own(trx));
1113 return false;
1114 });
1115 if (!retry) {
1116 return;
1117 }
1118 }
1119 /* wait_lock appears to be null. If blocking_trx isn't nullptr, then
1120 probably the wait_lock will soon be restored, otherwise we can give up */
1121 } while (trx->lock.blocking_trx.load() && ut_delay(10));
1122}
1123} // namespace locksys
1124
1125template <typename F>
1126lock_t *Locks_hashtable::find_in_cell(size_t cell_id, F &&f) {
1127 lock_t *lock = (lock_t *)hash_get_first(ht.get(), cell_id);
1128 while (lock != nullptr) {
1130 // f(lock) might remove the lock from list, so we must save the next pointer
1131 lock_t *next = lock->hash;
1132 if (std::forward<F>(f)(lock)) {
1133 return lock;
1134 }
1135 lock = next;
1136 }
1137 return nullptr;
1138}
1139
1140template <typename F>
1143 return find_in_cell(hash_calc_cell_id(lock_rec_hash_value(page_id), ht.get()),
1144 [&](lock_t *lock) {
1145 return (lock->rec_lock.page_id == page_id) &&
1146 std::forward<F>(f)(lock);
1147 });
1148}
1149
1150template <typename F>
1152 return find_on_page(block->get_page_id(), std::forward<F>(f));
1153}
1154
1155template <typename F>
1156lock_t *Locks_hashtable::find_on_record(const struct RecID &rec_id, F &&f) {
1157 return find_in_cell(hash_calc_cell_id(rec_id.hash_value(), ht.get()),
1158 [&](lock_t *lock) {
1159 return rec_id.matches(lock) && std::forward<F>(f)(lock);
1160 });
1161}
1162#ifdef UNIV_DEBUG
1163template <typename F>
1166 for (size_t cell_id = lock_sys->rec_hash.get_n_cells(); cell_id--;) {
1167 if (auto lock =
1168 lock_sys->rec_hash.find_in_cell(cell_id, std::forward<F>(f));
1169 lock != nullptr) {
1170 return lock;
1171 }
1172 }
1173 return nullptr;
1174}
1175#endif /*UNIV_DEBUG*/
1176#endif /* lock0priv_h */
static mysql_service_status_t init()
Component initialization.
Definition: audit_api_message_emit.cc:571
void btr_assert_not_corrupted(const buf_block_t *block, const dict_index_t *index)
Assert that a B-tree page is not corrupted.
Definition: btr0btr.h:152
A simple bitset wrapper class, which lets you access an existing range of bytes (not owned by it!...
Definition: ut0bitset.h:54
Create record locks.
Definition: lock0priv.h:690
ulint m_mode
Lock mode requested.
Definition: lock0priv.h:895
RecLock(dict_index_t *index, const buf_block_t *block, ulint heap_no, ulint mode)
Definition: lock0priv.h:744
RecID m_rec_id
The record lock tuple {space, page_no, heap_no}.
Definition: lock0priv.h:907
RecLock(que_thr_t *thr, dict_index_t *index, const RecID &rec_id, ulint mode)
Definition: lock0priv.h:698
static bool is_predicate_lock(ulint mode)
Definition: lock0priv.h:881
size_t lock_size() const
Definition: lock0priv.h:793
static size_t lock_size(const page_t *page)
Calculate the record lock physical size required, non-predicate lock.
Definition: lock0priv.h:870
RecLock(que_thr_t *thr, dict_index_t *index, const buf_block_t *block, ulint heap_no, ulint mode)
Definition: lock0priv.h:716
static size_t lock_size(ulint mode)
Calculate the record lock physical size required for a predicate lock.
Definition: lock0priv.h:835
size_t m_size
Size of the record lock in bytes.
Definition: lock0priv.h:899
trx_t * m_trx
Transaction requesting the record lock.
Definition: lock0priv.h:891
void init(const page_t *page)
Setup the context from the requirements.
Definition: lock0priv.h:812
que_thr_t * m_thr
The query thread of the transaction.
Definition: lock0priv.h:887
dict_index_t * m_index
Index on which the record lock is required.
Definition: lock0priv.h:903
RecLock(dict_index_t *index, const RecID &rec_id, ulint mode)
Definition: lock0priv.h:732
A RAII helper which latches global_latch in shared mode during constructor, and unlatches it during d...
Definition: lock0guards.h:71
void x_lock(ut::Location location)
Definition: lock0latches.h:141
void x_unlock()
Definition: lock0latches.h:142
Unique_sharded_rw_lock global_latch
Definition: lock0latches.h:264
A RAII helper which latches the mutex protecting given shard during constructor, and unlatches it dur...
Definition: lock0guards.h:91
Definition: lock0priv.h:1010
static void exclusive_latch(ut::Location location)
Definition: lock0priv.h:1013
static void exclusive_unlatch()
Definition: lock0priv.h:1012
Page identifier.
Definition: buf0types.h:207
int page
Definition: ctype-mb.cc:1236
dberr_t
Definition: db0err.h:39
static bool dict_index_is_online_ddl(const dict_index_t *index)
Determines if a secondary index is being or has been created online, or if the table is being rebuilt...
Data dictionary global types.
#define DEBUG_SYNC_C(_sync_point_name_)
Definition: my_sys.h:209
The simple hash table utility.
static void *& hash_get_first(hash_table_t *table, size_t cell_id)
Gets the first struct in a hash chain, NULL if none.
Definition: hash0hash.h:159
static uint64_t hash_calc_cell_id(uint64_t hash_value, hash_table_t const *table)
Calculates the cell index from a hashed value for a specified hash table.
#define UINT32_MAX
Definition: lexyy.cc:86
constexpr uint32_t LOCK_PRDT_PAGE
Page lock.
Definition: lock0lock.h:987
constexpr uint32_t LOCK_MODE_MASK
Lock modes and types.
Definition: lock0lock.h:949
constexpr uint32_t LOCK_PREDICATE
Predicate lock.
Definition: lock0lock.h:985
constexpr uint32_t LOCK_WAIT
Waiting lock flag; when set, it means that the lock has not yet been granted, it is just waiting for ...
Definition: lock0lock.h:962
static uint64_t lock_rec_hash_value(const page_id_t &page_id)
Calculates the hash value of a page file address: used in inserting or searching for a lock in the ha...
constexpr uint32_t LOCK_ORDINARY
this flag denotes an ordinary next-key lock in contrast to LOCK_GAP or LOCK_REC_NOT_GAP
Definition: lock0lock.h:966
constexpr uint32_t LOCK_TYPE_MASK
mask used to extract lock type from the type_mode field in a lock
Definition: lock0lock.h:956
constexpr uint32_t LOCK_INSERT_INTENTION
this bit is set when we place a waiting gap type record lock request in order to let an insert of an ...
Definition: lock0lock.h:983
constexpr uint32_t LOCK_TABLE
Lock types.
Definition: lock0lock.h:952
lock_sys_t * lock_sys
The lock system.
Definition: lock0lock.cc:197
constexpr uint32_t LOCK_GAP
when this bit is set, it means that the lock holds only on the gap before the record; for instance,...
Definition: lock0lock.h:971
constexpr uint32_t LOCK_REC_NOT_GAP
this bit means that the lock is only on the index record and does NOT block inserts to the gap before...
Definition: lock0lock.h:977
constexpr uint32_t LOCK_REC
record lock
Definition: lock0lock.h:954
static Locks_hashtable & lock_hash_get(ulint mode)
Get the lock hash table.
Definition: lock0lock.ic:69
struct lock_prdt lock_prdt_t
static ulint lock_mode_compatible(enum lock_mode mode1, enum lock_mode mode2)
Calculates if lock mode 1 is compatible with lock mode 2.
static const byte lock_strength_matrix[5][5]
Definition: lock0priv.h:610
static bool lock_mode_is_next_key_lock(ulint mode)
Checks if the mode is LOCK_S or LOCK_X (possibly ORed with LOCK_WAIT or LOCK_REC) which means the loc...
Definition: lock0priv.h:119
constexpr uint32_t MAX_STACK_SIZE
Maximum depth of the DFS stack.
Definition: lock0priv.h:619
void lock_reset_wait_and_release_thread_if_suspended(lock_t *lock)
This function is a wrapper around several functions which need to be called in particular order to wa...
Definition: lock0wait.cc:421
static const ulint lock_types
The count of the types of locks.
Definition: lock0priv.h:912
static const byte lock_compatibility_matrix[5][5]
Definition: lock0priv.h:593
constexpr uint32_t PRDT_HEAPNO
Definition: lock0priv.h:621
static bool lock_mode_stronger_or_eq(enum lock_mode mode1, enum lock_mode mode2)
Calculates if lock mode 1 is stronger or equal to lock mode 2.
void lock_notify_about_deadlock(const ut::vector< const trx_t * > &trxs_on_cycle, const trx_t *victim_trx)
Handles writing the information about found deadlock to the log files and caches it for future lock_l...
Definition: lock0lock.cc:6310
static const ulint LOCK_PAGE_BITMAP_MARGIN
Definition: lock0priv.h:342
static enum lock_mode lock_get_mode(const lock_t *lock)
Gets the mode of a lock.
std::ostream & operator<<(std::ostream &out, const lock_table_t &lock)
The global output operator is overloaded to conveniently print the lock_table_t object into the given...
Definition: lock0priv.h:79
static ulint lock_get_wait(const lock_t *lock)
Gets the wait flag of a lock.
bool lock_print_waits
static trx_id_t lock_clust_rec_some_has_impl(const rec_t *rec, const dict_index_t *index, const ulint *offsets)
Checks if some transaction has an implicit x-lock on a record in a clustered index.
static bool lock_rec_get_nth_bit(const lock_t *lock, ulint i)
Gets the nth bit of a record lock.
void lock_cancel_waiting_and_release(trx_t *trx)
Cancels a waiting lock request and releases possible other transactions waiting behind it.
Definition: lock0lock.cc:5855
static uint32_t lock_get_type_low(const lock_t *lock)
Gets the type of a lock.
static bool lock_table_has(const trx_t *trx, const dict_table_t *table, enum lock_mode mode)
Checks if a transaction has the specified table lock, or stronger.
lock_rec_req_status
Record locking request status.
Definition: lock0priv.h:623
@ LOCK_REC_FAIL
Failed to acquire a lock.
Definition: lock0priv.h:625
@ LOCK_REC_SUCCESS
Succeeded in acquiring a lock (implicit or already acquired)
Definition: lock0priv.h:627
@ LOCK_REC_SUCCESS_CREATED
Explicitly created a new lock.
Definition: lock0priv.h:629
static void lock_rec_set_nth_bit(lock_t *lock, ulint i)
Sets the nth bit of a record lock to true.
static uint32_t lock_rec_get_n_bits(const lock_t *lock)
Gets the number of bits in a record lock bitmap.
Lock module internal inline methods.
const char * lock_mode_string(enum lock_mode mode)
Convert the given enum value into string.
Definition: lock0types.h:69
lock_mode
Definition: lock0types.h:54
@ LOCK_S
Definition: lock0types.h:57
@ LOCK_X
Definition: lock0types.h:58
#define lock_t
Definition: lock0types.h:39
unsigned long long int ulonglong
Definition: my_inttypes.h:56
static HashTable ht
Definition: mysql.cc:148
Provides atomic access in shared-exclusive modes.
Definition: shared_spin_lock.h:79
Definition: lock0guards.h:34
bool owns_page_shard(const page_id_t &page_id)
Tests if given page shard can be safely accessed by the current thread.
Definition: lock0lock.cc:171
auto latch_peeked_shard_and_do(const lock_t *peeked_lock, F &&f)
Temporarily releases trx->mutex, latches the lock-sys shard containing peeked_lock and latches trx->m...
Definition: lock0priv.h:1032
void run_if_waiting(const TrxVersion trx_version, F &&f)
Given a pointer to trx (which the caller guarantees will not be freed) and the expected value of trx-...
Definition: lock0priv.h:1073
bool owns_shared_global_latch()
Tests if lock_sys latch is owned in shared mode by the current thread.
Definition: lock0lock.cc:167
bool owns_lock_shard(const lock_t *lock)
Checks if shard which contains lock is latched (or that an exclusive latch on whole lock_sys is held)...
Definition: lock0lock.cc:179
bool owns_exclusive_global_latch()
Tests if lock_sys latch is exclusively owned by the current thread.
Definition: lock0lock.cc:163
static mysql_service_status_t create(const char *service_names[], reference_caching_channel *out_channel) noexcept
Definition: component.cc:36
Definition: gcs_xcom_synode.h:64
mode
Definition: file_handle.h:60
bool wait_for(TCondition cond, std::chrono::steady_clock::duration max_wait)
Delays execution for at most max_wait or returns earlier if cond becomes true.
Definition: ut0ut.ic:131
std::basic_ostringstream< char, std::char_traits< char >, ut::allocator< char > > ostringstream
Specialization of basic_ostringstream which uses ut::allocator.
Definition: ut0new.h:2871
std::vector< T, ut::allocator< T > > vector
Specialization of vector which uses allocator.
Definition: ut0new.h:2875
static std::mutex lock
Definition: net_ns.cc:56
static uint16_t page_dir_get_n_heap(const page_t *page)
Gets the number of records in the heap.
constexpr ulint PAGE_HEAP_NO_INFIMUM
Page infimum.
Definition: page0types.h:131
constexpr ulint PAGE_HEAP_NO_SUPREMUM
Page supremum.
Definition: page0types.h:133
byte page_t
Type of the index page.
Definition: page0types.h:152
static trx_t * thr_get_trx(que_thr_t *thr)
Gets the trx of a query thread.
byte rec_t
Definition: rem0types.h:41
required string type
Definition: replication_group_member_actions.proto:34
Scope_guard< TLambda > create_scope_guard(const TLambda rollback_lambda)
Create a scope guard object.
Definition: scope_guard.h:113
bool srv_read_only_mode
Set if InnoDB must operate in read-only mode.
Definition: srv0srv.cc:198
A hashmap used by lock sys, to organize locks by page (block), so that it is easy to maintain a list ...
Definition: lock0lock.h:1018
lock_t * find_on_block(const buf_block_t *block, F &&f)
Definition: lock0priv.h:1151
lock_t * find(F &&f)
Definition: lock0priv.h:1164
lock_t * find_on_page(page_id_t page_id, F &&f)
Definition: lock0priv.h:1141
size_t get_n_cells()
Definition: lock0lock.h:1047
lock_t * find_on_record(const struct RecID &rec_id, F &&f)
Definition: lock0priv.h:1156
lock_t * find_in_cell(size_t cell_id, F &&f)
Definition: lock0priv.h:1126
Record lock ID.
Definition: lock0priv.h:634
bool is_supremum() const
Definition: lock0priv.h:666
uint64_t m_hash_value
Hash generated from record's location which will be used to get lock queue for this record.
Definition: lock0priv.h:685
uint64_t hash_value() const
Definition: lock0priv.h:663
const page_id_t & get_page_id() const
Definition: lock0priv.h:673
RecID(page_id_t page_id, uint32_t heap_no)
Constructor.
Definition: lock0priv.h:646
RecID(const buf_block_t *block, ulint heap_no)
Constructor.
Definition: lock0priv.h:658
uint32_t m_heap_no
Heap number within the page.
Definition: lock0priv.h:680
page_id_t m_page_id
Tablespace ID and page number within space
Definition: lock0priv.h:676
RecID(const lock_t *lock, ulint heap_no)
Constructor.
Definition: lock0priv.h:638
Definition: lock0priv.h:283
static const ut_list_node< lock_t > & get_node(const lock_t &lock)
Functor for accessing the embedded node within a table lock.
Definition: lock0priv.h:285
Definition: trx0types.h:635
uint64_t m_version
Definition: trx0types.h:639
trx_t * m_trx
Definition: trx0types.h:638
The buffer control block structure.
Definition: buf0buf.h:1690
const page_id_t & get_page_id() const
Get the page number and space id of the current buffer block.
Definition: buf0buf.h:1864
byte * frame
pointer to buffer frame which is of size UNIV_PAGE_SIZE, and aligned to an address divisible by UNIV_...
Definition: buf0buf.h:1706
Data structure for an index.
Definition: dict0mem.h:1046
Data structure for a database table.
Definition: dict0mem.h:1909
table_id_t id
Id of the table.
Definition: dict0mem.h:1964
table_name_t name
Table name.
Definition: dict0mem.h:1978
Definition: lock0prdt.h:40
Record lock for a page.
Definition: lock0priv.h:84
std::ostream & print(std::ostream &out) const
Print the record lock into the given output stream.
Definition: lock0priv.h:102
uint32_t n_bits
number of bits in the lock bitmap; Must be divisible by 8.
Definition: lock0priv.h:91
page_id_t page_id
The id of the page on which records referenced by this lock's bitmap are located.
Definition: lock0priv.h:87
locksys::Latches latches
The latches protecting queues of record and table locks.
Definition: lock0lock.h:1071
Locks_hashtable rec_hash
The hash table of the record (LOCK_REC) locks, except for predicate (LOCK_PREDICATE) and predicate pa...
Definition: lock0lock.h:1075
Lock struct; protected by lock_sys latches.
Definition: lock0priv.h:137
bool is_record_not_gap() const
Definition: lock0priv.h:204
Bitset< const byte > bitset() const
This is an overloaded member function, provided for convenience. It differs from the above function o...
Definition: lock0priv.h:257
dict_index_t * index
Index for a record lock.
Definition: lock0priv.h:145
lock_table_t tab_lock
Table lock.
Definition: lock0priv.h:153
Bitset< byte > bitset()
Gets access to the LOCK_REC's bitmap, which indicates heap_no-s, which are the subject of this lock r...
Definition: lock0priv.h:277
Locks_hashtable & hash_table() const
Get lock hash table.
Definition: lock0priv.h:231
lock_mode mode() const
Definition: lock0priv.h:225
const char * type_string() const
Definition: lock0priv.h:246
lock_rec_t rec_lock
Record lock.
Definition: lock0priv.h:156
bool includes_supremum() const
Definition: lock0priv.h:217
bool is_gap() const
Definition: lock0priv.h:201
ulonglong m_psi_internal_thread_id
Performance schema thread that created the lock.
Definition: lock0priv.h:162
bool is_insert_intention() const
Definition: lock0priv.h:212
bool is_record_lock() const
Determine if the lock object is a record lock.
Definition: lock0priv.h:189
trx_que_t trx_que_state() const
Definition: lock0priv.h:234
std::ostream & print(std::ostream &out) const
Print the lock object into the given output stream.
Definition: lock0priv.h:317
bool is_waiting() const
Definition: lock0priv.h:198
lock_t * hash
Hash chain node for a record lock.
Definition: lock0priv.h:149
ulonglong m_psi_event_id
Performance schema event that created the lock.
Definition: lock0priv.h:165
uint32_t type_mode
The lock type and mode bit flags.
Definition: lock0priv.h:171
trx_t * trx
transaction owning the lock
Definition: lock0priv.h:139
uint64_t m_seq
Timestamp when it was created.
Definition: lock0priv.h:175
bool is_next_key_lock() const
Definition: lock0priv.h:207
bool is_predicate() const
Determine if it is predicate lock.
Definition: lock0priv.h:193
uint32_t type() const
Definition: lock0priv.h:222
UT_LIST_NODE_T(lock_t) trx_locks
list of the locks of the transaction
void unlock_gap_lock()
Unlock the GAP Lock part of this Next Key Lock.
Definition: lock0priv.h:179
A table lock.
Definition: lock0priv.h:54
std::ostream & print(std::ostream &out) const
Print the table lock into the given output stream.
Definition: lock0priv.h:69
dict_table_t * table
database table in dictionary cache
Definition: lock0priv.h:55
locks
list of locks on the same table
Definition: lock0priv.h:58
Definition: que0que.h:242
std::atomic< trx_t * > blocking_trx
If this transaction is waiting for a lock, then blocking_trx points to a transaction which holds a co...
Definition: trx0trx.h:439
std::atomic< lock_t * > wait_lock
The lock request of this transaction is waiting for.
Definition: trx0trx.h:460
trx_que_t que_state
valid when trx->state == TRX_STATE_ACTIVE: TRX_QUE_RUNNING, TRX_QUE_LOCK_WAIT, ...
Definition: trx0trx.h:407
Definition: trx0trx.h:675
std::atomic_uint64_t version
Version of this instance.
Definition: trx0trx.h:1074
trx_lock_t lock
Information about the transaction locks and state.
Definition: trx0trx.h:822
Definition: ut0core.h:33
The two way list node.
Definition: ut0lst.h:50
#define trx_mutex_enter_first_of_two(t)
Acquire the trx->mutex (and indicate we might request one more).
Definition: trx0trx.h:1385
#define trx_mutex_exit(t)
Release the trx->mutex.
Definition: trx0trx.h:1388
#define trx_mutex_enter(t)
Acquire the trx->mutex (and promise not to request any more).
Definition: trx0trx.h:1382
bool trx_mutex_own(const trx_t *trx)
Test if trx->mutex is owned by the current thread.
Definition: trx0trx.h:1345
Transaction system global type definitions.
trx_que_t
Transaction execution states when trx->state == TRX_STATE_ACTIVE.
Definition: trx0types.h:71
ib_id_t trx_id_t
Transaction identifier (DB_TRX_ID, DATA_TRX_ID)
Definition: trx0types.h:138
Version control for database, common definitions, and include files.
constexpr size_t UNIV_WORD_SIZE
MySQL config.h generated by CMake will define SIZEOF_LONG in Posix.
Definition: univ.i:278
unsigned long int ulint
Definition: univ.i:406
#define UT_ARR_SIZE(a)
Definition: univ.i:524
Utilities for bitset operations.
#define UT_LOCATION_HERE
Definition: ut0core.h:47
#define ut_error
Abort execution.
Definition: ut0dbg.h:65
#define ut_ad(EXPR)
Debug assertion.
Definition: ut0dbg.h:69
#define UT_LIST_NODE_GETTER_DEFINITION(t, m)
A helper for the UT_LIST_BASE_NODE_T_EXTERN which declares a node getter struct which extracts member...
Definition: ut0lst.h:270
#define UT_LIST_NODE_T(t)
Macro used for legacy reasons.
Definition: ut0lst.h:64
ulint ut_delay(ulint delay)
Runs an idle loop on CPU.
Definition: ut0ut.cc:99
static task_env * retry
Definition: xcom_base.cc:436
static void prepare(pax_msg *p, pax_op op)
Definition: xcom_base.cc:1587