MySQL 8.4.0
Source Code Documentation
Go to the documentation of this file.
3Copyright (c) 2007, 2024, Oracle and/or its affiliates.
5This program is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License, version 2.0, as published by the
7Free Software Foundation.
9This program is designed to work with certain software (including
10but not limited to OpenSSL) that is licensed under separate terms,
11as designated in a particular file or component or in included license
12documentation. The authors of MySQL hereby grant you an additional
13permission to link the program and your derivative works with the
14separately licensed software that they have either included with
15the program or referenced in the documentation.
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
20for more details.
22You should have received a copy of the GNU General Public License along with
23this program; if not, write to the Free Software Foundation, Inc.,
2451 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28/** @file include/lock0priv.h
29 Lock module internal structures and methods.
31 Created July 12, 2007 Vasil Dimov
32 *******************************************************/
34#ifndef lock0priv_h
35#define lock0priv_h
38/* If you need to access members of the structures defined in this
39file, please write appropriate functions that retrieve them and put
40those functions in lock/ */
41#error Do not include lock0priv.h outside of the lock/ module
44#include "dict0types.h"
45#include "hash0hash.h"
46#include "trx0types.h"
47#include "univ.i"
49#include <scope_guard.h>
50#include <utility>
52/** A table lock */
54 dict_table_t *table; /*!< database table in dictionary
55 cache */
57 locks; /*!< list of locks on the same
58 table */
59 /** Print the table lock into the given output stream
60 @param[in,out] out the output stream
61 @return the given output stream. */
62 std::ostream &print(std::ostream &out) const;
65/** Print the table lock into the given output stream
66@param[in,out] out the output stream
67@return the given output stream. */
68inline std::ostream &lock_table_t::print(std::ostream &out) const {
69 out << "[lock_table_t: name=" << table->name << "]";
70 return (out);
73/** The global output operator is overloaded to conveniently
74print the lock_table_t object into the given output stream.
75@param[in,out] out the output stream
76@param[in] lock the table lock
77@return the given output stream */
78inline std::ostream &operator<<(std::ostream &out, const lock_table_t &lock) {
79 return (lock.print(out));
82/** Record lock for a page */
83struct lock_rec_t {
84 /** The id of the page on which records referenced by this lock's bitmap are
85 located. */
87 /** number of bits in the lock bitmap;
88 Must be divisible by 8.
89 NOTE: the lock bitmap is placed immediately after the lock struct */
90 uint32_t n_bits;
92 /** Print the record lock into the given output stream
93 @param[in,out] out the output stream
94 @return the given output stream. */
95 std::ostream &print(std::ostream &out) const;
98/** Print the record lock into the given output stream
99@param[in,out] out the output stream
100@return the given output stream. */
101inline std::ostream &lock_rec_t::print(std::ostream &out) const {
102 return out << "[lock_rec_t: page_id=" << page_id << ", n_bits=" << n_bits
103 << "]";
106inline std::ostream &operator<<(std::ostream &out, const lock_rec_t &lock) {
107 return (lock.print(out));
111Checks if the `mode` is LOCK_S or LOCK_X (possibly ORed with LOCK_WAIT or
112LOCK_REC) which means the lock is a
113Next Key Lock, a.k.a. LOCK_ORDINARY, as opposed to Predicate Lock,
114GAP lock, Insert Intention or Record Lock.
115@param mode A mode and flags, of a lock.
116@return true iff the only bits set in `mode` are LOCK_S or LOCK_X and optionally
119 static_assert(LOCK_ORDINARY == 0, "LOCK_ORDINARY must be 0 (no flags)");
120 ut_ad((mode & LOCK_TABLE) == 0);
121 mode &= ~(LOCK_WAIT | LOCK_REC);
122 ut_ad((mode & LOCK_WAIT) == 0);
123 ut_ad((mode & LOCK_TYPE_MASK) == 0);
125 (mode == LOCK_S || mode == LOCK_X));
126 return (mode & ~(LOCK_MODE_MASK)) == LOCK_ORDINARY;
129/** Gets the nth bit of a record lock.
130@param[in] lock record lock
131@param[in] i index of the bit
132@return true if bit set also if i == ULINT_UNDEFINED return false */
133static inline bool lock_rec_get_nth_bit(const lock_t *lock, ulint i);
135/** Lock struct; protected by lock_sys latches */
136struct lock_t {
137 /** transaction owning the lock */
140 /** list of the locks of the transaction */
143 /** Index for a record lock */
146 /** Hash chain node for a record lock. The link node in a singly
147 linked list, used by the hash table. */
150 union {
151 /** Table lock */
154 /** Record lock */
156 };
160 /** Performance schema thread that created the lock. */
163 /** Performance schema event that created the lock. */
168 /** The lock type and mode bit flags.
170 uint32_t type_mode;
172#if defined(UNIV_DEBUG)
173 /** Timestamp when it was created. */
174 uint64_t m_seq;
175#endif /* UNIV_DEBUG */
177 /** Unlock the GAP Lock part of this Next Key Lock */
179 ut_ad(!is_gap());
180 ut_ad(!is_insert_intention());
181 ut_ad(is_next_key_lock());
183 type_mode |= LOCK_REC_NOT_GAP;
184 }
186 /** Determine if the lock object is a record lock.
187 @return true if record lock, false otherwise. */
188 bool is_record_lock() const { return (type() == LOCK_REC); }
190 /** Determine if it is predicate lock.
191 @return true if predicate lock, false otherwise. */
192 bool is_predicate() const {
193 return (type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
194 }
196 /** @return true if the lock wait flag is set */
197 bool is_waiting() const { return (type_mode & LOCK_WAIT); }
199 /** @return true if the gap lock bit is set */
200 bool is_gap() const { return (type_mode & LOCK_GAP); }
202 /** @return true if the not gap lock bit is set */
203 bool is_record_not_gap() const { return (type_mode & LOCK_REC_NOT_GAP); }
205 /** @return true iff the lock is a Next Key Lock */
206 bool is_next_key_lock() const {
207 return is_record_lock() && lock_mode_is_next_key_lock(type_mode);
208 }
210 /** @return true if the insert intention bit is set */
211 bool is_insert_intention() const {
212 return (type_mode & LOCK_INSERT_INTENTION);
213 }
215 /** @return true iff this lock is (at least) on supremum pseudo-record */
216 bool includes_supremum() const {
218 }
220 /** @return the lock mode */
221 uint32_t type() const { return (type_mode & LOCK_TYPE_MASK); }
223 /** @return the precise lock mode */
224 lock_mode mode() const {
225 return (static_cast<lock_mode>(type_mode & LOCK_MODE_MASK));
226 }
228 /** Get lock hash table
229 @return lock hash table */
230 hash_table_t *hash_table() const { return (lock_hash_get(type_mode)); }
232 /** @return the transaction's query thread state. */
233 trx_que_t trx_que_state() const { return (trx->lock.que_state); }
235 /** Print the lock object into the given output stream.
236 @param[in,out] out the output stream
237 @return the given output stream. */
238 std::ostream &print(std::ostream &out) const;
240 /** Convert the member 'type_mode' into a human readable string.
241 @return human readable string */
242 std::string type_mode_string() const;
244 /* @return the string/text representation of the record type. */
245 const char *type_string() const {
246 switch (type_mode & LOCK_TYPE_MASK) {
247 case LOCK_REC:
248 return ("LOCK_REC");
249 case LOCK_TABLE:
250 return ("LOCK_TABLE");
251 default:
252 ut_error;
253 }
254 }
259/** Convert the member 'type_mode' into a human readable string.
260@return human readable string */
261inline std::string lock_t::type_mode_string() const {
263 sout << type_string();
264 sout << " | " << lock_mode_string(mode());
266 if (is_record_not_gap()) {
267 sout << " | LOCK_REC_NOT_GAP";
268 }
270 if (is_waiting()) {
271 sout << " | LOCK_WAIT";
272 }
274 if (is_gap()) {
275 sout << " | LOCK_GAP";
276 }
278 if (is_insert_intention()) {
279 sout << " | LOCK_INSERT_INTENTION";
280 }
281 return (sout.str());
284inline std::ostream &lock_t::print(std::ostream &out) const {
285 out << "[lock_t: type_mode=" << type_mode << "(" << type_mode_string() << ")";
287 if (is_record_lock()) {
288 out << rec_lock;
289 } else {
290 out << tab_lock;
291 }
293 out << "]";
294 return (out);
297inline std::ostream &operator<<(std::ostream &out, const lock_t &lock) {
298 return (lock.print(out));
301#ifdef UNIV_DEBUG
302extern bool lock_print_waits;
303#endif /* UNIV_DEBUG */
305/* Safety margin when creating a new record lock: this many extra records
306can be inserted to the page without need to create a lock with a bigger
307bitmap */
311/* An explicit record lock affects both the record and the gap before it.
312An implicit x-lock does not affect the gap, it only locks the index
313record from read or update.
315If a transaction has modified or inserted an index record, then
316it owns an implicit x-lock on the record. On a secondary index record,
317a transaction has an implicit x-lock also if it has modified the
318clustered index record, the max trx id of the page where the secondary
319index record resides is >= trx id of the transaction (or database recovery
320is running), and there are no explicit non-gap lock requests on the
321secondary index record.
323This complicated definition for a secondary index comes from the
324implementation: we want to be able to determine if a secondary index
325record has an implicit x-lock, just by looking at the present clustered
326index record, not at the historical versions of the record. The
327complicated definition can be explained to the user so that there is
328nondeterminism in the access path when a query is answered: we may,
329or may not, access the clustered index record and thus may, or may not,
330bump into an x-lock set there.
332Different transaction can have conflicting locks set on the gap at the
333same time. The locks on the gap are purely inhibitive: an insert cannot
334be made, or a select cursor may have to wait if a different transaction
335has a conflicting lock on the gap. An x-lock on the gap does not give
336the right to insert into the gap.
338An explicit lock can be placed on a user record or the supremum record of
339a page. The locks on the supremum record are always thought to be of the gap
340type, though the gap bit is not set. When we perform an update of a record
341where the size of the record changes, we may temporarily store its explicit
342locks on the infimum record of the page, though the infimum otherwise never
343carries locks.
345A waiting record lock can also be of the gap type. A waiting lock request
346can be granted when there is no conflicting mode lock request by another
347transaction ahead of it in the explicit lock queue.
349In version 4.0.5 we added yet another explicit lock type: LOCK_REC_NOT_GAP.
350It only locks the record it is placed on, not the gap before the record.
351This lock type is necessary to emulate an Oracle-like READ COMMITTED isolation
355RULE 1: If there is an implicit x-lock on a record, and there are non-gap
357lock requests waiting in the queue, then the transaction holding the implicit
358x-lock also has an explicit non-gap record x-lock. Therefore, as locks are
359released, we can grant locks to waiting lock requests purely by looking at
360the explicit lock requests in the queue.
362RULE 3: Different transactions cannot have conflicting granted non-gap locks
364on a record at the same time. However, they can have conflicting granted gap
366RULE 4: If a there is a waiting lock request in a queue, no lock request,
368gap or not, can be inserted ahead of it in the queue. In record deletes
369and page splits new gap type locks can be created by the database manager
370for a transaction, and without rule 4, the waits-for graph of transactions
371might become cyclic without the database noticing it, as the deadlock check
372is only performed when a transaction itself requests a lock!
375An insert is allowed to a gap if there are no explicit lock requests by
376other transactions on the next record. It does not matter if these lock
377requests are granted or waiting, gap bit set or not, with the exception
378that a gap type request set by another transaction to wait for
379its turn to do an insert is ignored. On the other hand, an
380implicit x-lock by another transaction does not prevent an insert, which
381allows for more concurrency when using an Oracle-style sequence number
382generator for the primary key with many transactions doing inserts
385A modify of a record is allowed if the transaction has an x-lock on the
386record, or if other transactions do not have any non-gap lock requests on the
389A read of a single user record with a cursor is allowed if the transaction
390has a non-gap explicit, or an implicit lock on the record, or if the other
391transactions have no x-lock requests on the record. At a page supremum a
392read is always allowed.
394In summary, an implicit lock is seen as a granted x-lock only on the
395record, not on the gap. An explicit lock with no gap bit set is a lock
396both on the record and the gap. If the gap bit is set, the lock is only
397on the gap. Different transaction cannot own conflicting locks on the
398record at the same time, but they may own conflicting locks on the gap.
399Granted locks on a record give an access right to the record, but gap type
400locks just inhibit operations.
402NOTE: Finding out if some transaction has an implicit x-lock on a secondary
403index record can be cumbersome. We may have to look at previous versions of
404the corresponding clustered index record to find out if a delete marked
405secondary index record was delete marked by an active transaction, not by
406a committed one.
408FACT A: If a transaction has inserted a row, it can delete it any time
409without need to wait for locks.
411PROOF: The transaction has an implicit x-lock on every index record inserted
412for the row, and can thus modify each record without the need to wait. Q.E.D.
414FACT B: If a transaction has read some result set with a cursor, it can read
415it again, and retrieves the same result set, if it has not modified the
416result set in the meantime. Hence, there is no phantom problem. If the
417biggest record, in the alphabetical order, touched by the cursor is removed,
418a lock wait may occur, otherwise not.
420PROOF: When a read cursor proceeds, it sets an s-lock on each user record
421it passes, and a gap type s-lock on each page supremum. The cursor must
422wait until it has these locks granted. Then no other transaction can
423have a granted x-lock on any of the user records, and therefore cannot
424modify the user records. Neither can any other transaction insert into
425the gaps which were passed over by the cursor. Page splits and merges,
426and removal of obsolete versions of records do not affect this, because
427when a user record or a page supremum is removed, the next record inherits
428its locks as gap type locks, and therefore blocks inserts to the same gap.
429Also, if a page supremum is inserted, it inherits its locks from the successor
430record. When the cursor is positioned again at the start of the result set,
431the records it will touch on its course are either records it touched
432during the last pass or new inserted page supremums. It can immediately
433access all these records, and when it arrives at the biggest record, it
434notices that the result set is complete. If the biggest record was removed,
435lock wait can occur because the next record only inherits a gap type lock,
436and a wait may be needed. Q.E.D. */
438/* If an index record should be changed or a new inserted, we must check
439the lock on the record or the next. When a read cursor starts reading,
440we will set a record level s-lock on each record it passes, except on the
441initial record on which the cursor is positioned before we start to fetch
442records. Our index tree search has the convention that the B-tree
443cursor is positioned BEFORE the first possibly matching record in
444the search. Optimizations are possible here: if the record is searched
445on an equality condition to a unique key, we could actually set a special
446lock on the record, a lock which would not prevent any insert before
447this record. In the next key locking an x-lock set on a record also
448prevents inserts just before that record.
449 There are special infimum and supremum records on each page.
450A supremum record can be locked by a read cursor. This records cannot be
451updated but the lock prevents insert of a user record to the end of
452the page.
453 Next key locks will prevent the phantom problem where new rows
454could appear to SELECT result sets after the select operation has been
455performed. Prevention of phantoms ensures the serilizability of
457 What should we check if an insert of a new record is wanted?
458Only the lock on the next record on the same page, because also the
459supremum record can carry a lock. An s-lock prevents insertion, but
460what about an x-lock? If it was set by a searched update, then there
461is implicitly an s-lock, too, and the insert should be prevented.
462What if our transaction owns an x-lock to the next record, but there is
463a waiting s-lock request on the next record? If this s-lock was placed
464by a read cursor moving in the ascending order in the index, we cannot
465do the insert immediately, because when we finally commit our transaction,
466the read cursor should see also the new inserted record. So we should
467move the read cursor backward from the next record for it to pass over
468the new inserted record. This move backward may be too cumbersome to
469implement. If we in this situation just enqueue a second x-lock request
470for our transaction on the next record, then the deadlock mechanism
471notices a deadlock between our transaction and the s-lock request
472transaction. This seems to be an ok solution.
473 We could have the convention that granted explicit record locks,
474lock the corresponding records from changing, and also lock the gaps
475before them from inserting. A waiting explicit lock request locks the gap
476before from inserting. Implicit record x-locks, which we derive from the
477transaction id in the clustered index record, only lock the record itself
478from modification, not the gap before it from inserting.
479 How should we store update locks? If the search is done by a unique
480key, we could just modify the record trx id. Otherwise, we could put a record
481x-lock on the record. If the update changes ordering fields of the
482clustered index record, the inserted new record needs no record lock in
483lock table, the trx id is enough. The same holds for a secondary index
484record. Searched delete is similar to update.
487What about waiting lock requests? If a transaction is waiting to make an
488update to a record which another modified, how does the other transaction
489know to send the end-lock-wait signal to the waiting transaction? If we have
490the convention that a transaction may wait for just one lock at a time, how
491do we preserve it if lock wait ends?
494Checking the trx id label of a secondary index record. In the case of a
495modification, not an insert, is this necessary? A secondary index record
496is modified only by setting or resetting its deleted flag. A secondary index
497record contains fields to uniquely determine the corresponding clustered
498index record. A secondary index record is therefore only modified if we
499also modify the clustered index record, and the trx id checking is done
500on the clustered index record, before we come to modify the secondary index
501record. So, in the case of delete marking or unmarking a secondary index
502record, we do not have to care about trx ids, only the locks in the lock
503table must be checked. In the case of a select from a secondary index, the
504trx id is relevant, and in this case we may have to search the clustered
505index record.
507PROBLEM: How to update record locks when page is split or merged, or
509a record is deleted or updated?
510If the size of fields in a record changes, we perform the update by
511a delete followed by an insert. How can we retain the locks set or
512waiting on the record? Because a record lock is indexed in the bitmap
513by the heap number of the record, when we remove the record from the
514record list, it is possible still to keep the lock bits. If the page
515is reorganized, we could make a table of old and new heap numbers,
516and permute the bitmaps in the locks accordingly. We can add to the
517table a row telling where the updated record ended. If the update does
518not require a reorganization of the page, we can simply move the lock
519bits for the updated record to the position determined by its new heap
520number (we may have to allocate a new lock, if we run out of the bitmap
521in the old one).
522 A more complicated case is the one where the reinsertion of the
523updated record is done pessimistically, because the structure of the
524tree may change.
526PROBLEM: If a supremum record is removed in a page merge, or a record
528removed in a purge, what to do to the waiting lock requests? In a split to
529the right, we just move the lock requests to the new supremum. If a record
530is removed, we could move the waiting lock request to its inheritor, the
531next record in the index. But, the next record may already have lock
532requests on its own queue. A new deadlock check should be made then. Maybe
533it is easier just to release the waiting transactions. They can then enqueue
534new lock requests on appropriate records.
536PROBLEM: When a record is inserted, what locks should it inherit from the
538upper neighbor? An insert of a new supremum record in a page split is
539always possible, but an insert of a new user record requires that the upper
540neighbor does not have any lock requests by other transactions, granted or
541waiting, in its lock queue. Solution: We can copy the locks as gap type
542locks, so that also the waiting locks are transformed to granted gap type
543locks on the inserted record. */
546 IS IX S X AI
547 IS + + + - +
548 IX + + - - +
549 S + - + - -
550 X - - - - -
551 AI + + - - -
552 *
553 Note that for rows, InnoDB only acquires S or X locks.
554 For tables, InnoDB normally acquires IS or IX locks.
555 S or X table locks are only acquired for LOCK TABLES.
556 Auto-increment (AI) locks are needed because of
557 statement-level MySQL binlog.
558 See also lock_mode_compatible().
559 */
560static const byte lock_compatibility_matrix[5][5] = {
561 /** IS IX S X AI */
562 /* IS */ {true, true, true, false, true},
563 /* IX */ {true, true, false, false, true},
564 /* S */ {true, false, true, false, false},
565 /* X */ {false, false, false, false, false},
566 /* AI */ {true, true, false, false, false}};
568/* STRONGER-OR-EQUAL RELATION (mode1=row, mode2=column)
569 IS IX S X AI
570 IS + - - - -
571 IX + + - - -
572 S + - + - -
573 X + + + + +
574 AI - - - - +
575 See lock_mode_stronger_or_eq().
576 */
577static const byte lock_strength_matrix[5][5] = {
578 /** IS IX S X AI */
579 /* IS */ {true, false, false, false, false},
580 /* IX */ {true, true, false, false, false},
581 /* S */ {true, false, true, false, false},
582 /* X */ {true, true, true, true, true},
583 /* AI */ {false, false, false, false, true}};
585/** Maximum depth of the DFS stack. */
586constexpr uint32_t MAX_STACK_SIZE = 4096;
589/** Record locking request status */
591 /** Failed to acquire a lock */
593 /** Succeeded in acquiring a lock (implicit or already acquired) */
595 /** Explicitly created a new lock */
600Record lock ID */
601struct RecID {
602 /** Constructor
603 @param[in] lock Record lock
604 @param[in] heap_no Heap number in the page */
605 RecID(const lock_t *lock, ulint heap_no)
606 : RecID(lock->rec_lock.page_id, heap_no) {
607 ut_ad(lock->is_record_lock());
608 }
610 /** Constructor
611 @param[in] page_id Tablespace ID and page number within space
612 @param[in] heap_no Heap number in the page */
613 RecID(page_id_t page_id, uint32_t heap_no)
614 : m_page_id(page_id),
615 m_heap_no(heap_no),
616 m_hash_value(lock_rec_hash_value(page_id)) {
617 ut_ad( < UINT32_MAX);
618 ut_ad(m_page_id.page_no() < UINT32_MAX);
619 ut_ad(m_heap_no < UINT32_MAX);
620 }
622 /** Constructor
623 @param[in] block Block in a tablespace
624 @param[in] heap_no Heap number in the block */
625 RecID(const buf_block_t *block, ulint heap_no)
626 : RecID(block->get_page_id(), heap_no) {}
628 /**
629 @return the hashed value of {space, page_no} */
630 uint64_t hash_value() const { return (m_hash_value); }
632 /** @return true if it's the supremum record */
633 bool is_supremum() const { return (m_heap_no == PAGE_HEAP_NO_SUPREMUM); }
635 /* Check if the rec id matches the lock instance.
636 @param[i] lock Lock to compare with
637 @return true if <space, page_no, heap_no> matches the lock. */
638 inline bool matches(const lock_t *lock) const;
640 const page_id_t &get_page_id() const { return m_page_id; }
642 /** Tablespace ID and page number within space */
645 /**
646 Heap number within the page */
647 uint32_t m_heap_no;
649 /**
650 Hash generated from record's location which will be used to get lock queue for
651 this record. */
652 uint64_t m_hash_value;
656Create record locks */
657class RecLock {
658 public:
659 /**
660 @param[in,out] thr Transaction query thread requesting the record
661 lock
662 @param[in] index Index on which record lock requested
663 @param[in] rec_id Record lock tuple {space, page_no, heap_no}
664 @param[in] mode The lock mode */
665 RecLock(que_thr_t *thr, dict_index_t *index, const RecID &rec_id, ulint mode)
666 : m_thr(thr),
667 m_trx(thr_get_trx(thr)),
668 m_mode(mode),
669 m_index(index),
670 m_rec_id(rec_id) {
671 ut_ad(is_predicate_lock(m_mode));
673 init(nullptr);
674 }
676 /**
677 @param[in,out] thr Transaction query thread requesting the record
678 lock
679 @param[in] index Index on which record lock requested
680 @param[in] block Buffer page containing record
681 @param[in] heap_no Heap number within the block
682 @param[in] mode The lock mode */
683 RecLock(que_thr_t *thr, dict_index_t *index, const buf_block_t *block,
684 ulint heap_no, ulint mode)
685 : m_thr(thr),
686 m_trx(thr_get_trx(thr)),
687 m_mode(mode),
688 m_index(index),
689 m_rec_id(block, heap_no) {
690 btr_assert_not_corrupted(block, index);
692 init(block->frame);
693 }
695 /**
696 @param[in] index Index on which record lock requested
697 @param[in] rec_id Record lock tuple {space, page_no, heap_no}
698 @param[in] mode The lock mode */
699 RecLock(dict_index_t *index, const RecID &rec_id, ulint mode)
700 : m_thr(), m_trx(), m_mode(mode), m_index(index), m_rec_id(rec_id) {
701 ut_ad(is_predicate_lock(m_mode));
703 init(nullptr);
704 }
706 /**
707 @param[in] index Index on which record lock requested
708 @param[in] block Buffer page containing record
709 @param[in] heap_no Heap number within block
710 @param[in] mode The lock mode */
711 RecLock(dict_index_t *index, const buf_block_t *block, ulint heap_no,
712 ulint mode)
713 : m_thr(),
714 m_trx(),
715 m_mode(mode),
716 m_index(index),
717 m_rec_id(block, heap_no) {
718 btr_assert_not_corrupted(block, index);
720 init(block->frame);
721 }
723 /**
724 Enqueue a lock wait for a transaction. If it is a high priority transaction
725 (cannot rollback) then try to jump ahead in the record lock wait queue. Also
726 check if async rollback was request for our trx.
727 @param[in, out] wait_for The lock that the the joining transaction is
728 waiting for
729 @param[in] prdt Predicate [optional]
731 @retval DB_DEADLOCK means that async rollback was requested for our trx
732 @retval DB_SUCCESS_LOCKED_REC means that we are High Priority transaction and
733 we've managed to jump in front of other waiting
734 transactions and got the lock granted, so there
735 is no need to wait. */
736 dberr_t add_to_waitq(const lock_t *wait_for,
737 const lock_prdt_t *prdt = nullptr);
739 /**
740 Create a lock for a transaction and initialise it.
741 @param[in, out] trx Transaction requesting the new lock
742 @param[in] prdt Predicate lock (optional)
743 @return new lock instance */
744 lock_t *create(trx_t *trx, const lock_prdt_t *prdt = nullptr);
746 /**
747 Create the lock instance
748 @param[in, out] trx The transaction requesting the lock
749 @param[in, out] index Index on which record lock is required
750 @param[in] mode The lock mode desired
751 @param[in] rec_id The record id
752 @param[in] size Size of the lock + bitmap requested
753 @return a record lock instance */
754 static lock_t *lock_alloc(trx_t *trx, dict_index_t *index, ulint mode,
755 const RecID &rec_id, ulint size);
757 private:
758 /*
759 @return the record lock size in bytes */
760 size_t lock_size() const { return (m_size); }
762 /**
763 Do some checks and prepare for creating a new record lock */
764 void prepare() const;
766 /**
767 Setup the requesting transaction state for lock grant
768 @param[in,out] lock Lock for which to change state */
769 void set_wait_state(lock_t *lock);
771 /**
772 Add the lock to the record lock hash and the transaction's lock list
773 @param[in,out] lock Newly created record lock to add to the
774 rec hash and the transaction lock list */
775 void lock_add(lock_t *lock);
777 /**
778 Setup the context from the requirements */
779 void init(const page_t *page) {
780 ut_ad(locksys::owns_page_shard(m_rec_id.get_page_id()));
782 ut_ad(m_index->is_clustered() || !dict_index_is_online_ddl(m_index));
783 ut_ad(m_thr == nullptr || m_trx == thr_get_trx(m_thr));
785 m_size = is_predicate_lock(m_mode) ? lock_size(m_mode) : lock_size(page);
787 /** If rec is the supremum record, then we reset the
788 gap and LOCK_REC_NOT_GAP bits, as all locks on the
789 supremum are automatically of the gap type */
791 if (m_rec_id.m_heap_no == PAGE_HEAP_NO_SUPREMUM) {
792 ut_ad(!(m_mode & LOCK_REC_NOT_GAP));
794 m_mode &= ~(LOCK_GAP | LOCK_REC_NOT_GAP);
795 }
796 }
798 /**
799 Calculate the record lock physical size required for a predicate lock.
800 @param[in] mode For predicate locks the lock mode
801 @return the size of the lock data structure required in bytes */
802 static size_t lock_size(ulint mode) {
803 ut_ad(is_predicate_lock(mode));
805 /* The lock is always on PAGE_HEAP_NO_INFIMUM(0),
806 so we only need 1 bit (which is rounded up to 1
807 byte) for lock bit setting */
809 size_t n_bytes;
811 if (mode & LOCK_PREDICATE) {
812 const ulint align = UNIV_WORD_SIZE - 1;
814 /* We will attach the predicate structure
815 after lock. Make sure the memory is
816 aligned on 8 bytes, the mem_heap_alloc
817 will align it with MEM_SPACE_NEEDED
818 anyway. */
820 n_bytes = (1 + sizeof(lock_prdt_t) + align) & ~align;
822 /* This should hold now */
824 ut_ad(n_bytes == sizeof(lock_prdt_t) + UNIV_WORD_SIZE);
826 } else {
827 n_bytes = 1;
828 }
830 return (n_bytes);
831 }
833 /**
834 Calculate the record lock physical size required, non-predicate lock.
835 @param[in] page For non-predicate locks the buffer page
836 @return the size of the lock data structure required in bytes */
837 static size_t lock_size(const page_t *page) {
840 /* Make lock bitmap bigger by a safety margin */
842 return (1 + ((n_recs + LOCK_PAGE_BITMAP_MARGIN) / 8));
843 }
845 /**
846 @return true if the requested lock mode is for a predicate
847 or page lock */
849 return (mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
850 }
852 private:
853 /** The query thread of the transaction */
856 /**
857 Transaction requesting the record lock */
860 /**
861 Lock mode requested */
864 /**
865 Size of the record lock in bytes */
866 size_t m_size;
868 /**
869 Index on which the record lock is required */
872 /**
873 The record lock tuple {space, page_no, heap_no} */
877#ifdef UNIV_DEBUG
878/** The count of the types of locks. */
880#endif /* UNIV_DEBUG */
882/** Gets the type of a lock.
883 @return LOCK_TABLE or LOCK_REC */
884static inline uint32_t lock_get_type_low(const lock_t *lock); /*!< in: lock */
886/** Gets the previous record lock set on a record.
887 @return previous lock on the same record, NULL if none exists */
889 const lock_t *in_lock, /*!< in: record lock */
890 ulint heap_no); /*!< in: heap number of the record */
892/** Cancels a waiting lock request and releases possible other transactions
893waiting behind it.
894@param[in,out] trx The transaction waiting for a lock */
897/** This function is a wrapper around several functions which need to be called
898in particular order to wake up a transaction waiting for a lock.
899You should not call lock_wait_release_thread_if_suspended(thr) directly,
900but rather use this wrapper, as this makes it much easier to reason about all
901possible states in which lock, trx, and thr can be.
902It makes sure that trx is woken up exactly once, and only if it already went to
904@param[in, out] lock The lock for which lock->trx is waiting */
907/** Checks if some transaction has an implicit x-lock on a record in a clustered
909@param[in] rec User record.
910@param[in] index Clustered index.
911@param[in] offsets rec_get_offsets(rec, index)
912@return transaction id of the transaction which has the x-lock, or 0 */
913[[nodiscard]] static inline trx_id_t lock_clust_rec_some_has_impl(
914 const rec_t *rec, const dict_index_t *index, const ulint *offsets);
916/** Gets the first or next record lock on a page.
917 @return next lock, NULL if none exists */
919 const lock_t *lock); /*!< in: a record lock */
921/** Gets the number of bits in a record lock bitmap.
922@param[in] lock The record lock
923@return number of bits */
924static inline uint32_t lock_rec_get_n_bits(const lock_t *lock);
926/** Sets the nth bit of a record lock to true.
927@param[in] lock record lock
928@param[in] i index of the bit */
929static inline void lock_rec_set_nth_bit(lock_t *lock, ulint i);
931/** Gets the first or next record lock on a page.
932 @return next lock, NULL if none exists */
934 lock_t *lock); /*!< in: a record lock */
936/** Gets the first record lock on a page, where the page is identified by its
937file address.
938@param[in] lock_hash lock hash table
939@param[in] page_id specifies space id and page number of the page
940@return first lock, NULL if none exists */
942 const page_id_t &page_id);
944/** Gets the first record lock on a page, where the page is identified by a
945pointer to it.
946@param[in] lock_hash lock hash table
947@param[in] block buffer block
948@return first lock, NULL if none exists */
950 const buf_block_t *block);
952/** Gets the next explicit lock request on a record.
953@param[in] heap_no heap number of the record
954@param[in] lock lock
955@return next lock, NULL if none exists or if heap_no == ULINT_UNDEFINED */
956static inline lock_t *lock_rec_get_next(ulint heap_no, lock_t *lock);
958/** Gets the next explicit lock request on a record.
959@param[in] heap_no heap number of the record
960@param[in] lock lock
961@return next lock, NULL if none exists or if heap_no == ULINT_UNDEFINED */
962static inline const lock_t *lock_rec_get_next_const(ulint heap_no,
963 const lock_t *lock);
965/** Gets the first explicit lock request on a record.
966@param[in] hash hash chain the lock on
967@param[in] block block containing the record
968@param[in] heap_no heap number of the record
969@return first lock, NULL if none exists */
971 const buf_block_t *block,
972 ulint heap_no);
974/** Gets the mode of a lock.
975 @return mode */
976static inline enum lock_mode lock_get_mode(const lock_t *lock); /*!< in: lock */
978/** Calculates if lock mode 1 is compatible with lock mode 2.
979@param[in] mode1 lock mode
980@param[in] mode2 lock mode
981@return nonzero if mode1 compatible with mode2 */
982static inline ulint lock_mode_compatible(enum lock_mode mode1,
983 enum lock_mode mode2);
985/** Calculates if lock mode 1 is stronger or equal to lock mode 2.
986@param[in] mode1 lock mode 1
987@param[in] mode2 lock mode 2
988@return true iff mode1 stronger or equal to mode2 */
989static inline bool lock_mode_stronger_or_eq(enum lock_mode mode1,
990 enum lock_mode mode2);
992/** Gets the wait flag of a lock.
993 @return LOCK_WAIT if waiting, 0 if not */
994static inline ulint lock_get_wait(const lock_t *lock); /*!< in: lock */
996/** Checks if a transaction has the specified table lock, or stronger. This
997function should only be called by the thread that owns the transaction.
998This function acquires trx->mutex which protects trx->lock.trx_locks, but you
999should understand that this only makes it easier to argue against races at the
1000level of access to the data structure, yet does not buy us any protection at
1001the higher level of making actual decisions based on the result of this call -
1002it may happen that another thread is removing a table lock,
1003and even though lock_table_has returned true to the caller, the lock is no
1004longer in possession of trx once the caller gets to evaluate if/else condition
1005based on the result.
1006Therefore it is up to caller to make sure that the context of the call to this
1007function and making any decisions based on the result is protected from any
1008concurrent modifications. This in turn makes the whole trx_mutex_enter/exit
1009a bit redundant, but it does not affect performance yet makes the reasoning
1010about data structure a bit easier and protects trx->lock.trx_locks data
1011structure from corruption in case our high level reasoning about absence of
1012parallel modifications turns out wrong.
1013@param[in] trx transaction
1014@param[in] table table
1015@param[in] mode lock mode
1016@return lock or NULL */
1017static inline bool lock_table_has(const trx_t *trx, const dict_table_t *table,
1018 enum lock_mode mode);
1020/** Handles writing the information about found deadlock to the log files
1021and caches it for future lock_latest_err_file() calls (for example used by
1023@param[in] trxs_on_cycle trxs causing deadlock, i-th waits for i+1-th
1024@param[in] victim_trx the trx from trx_on_cycle which will be rolled back */
1026 const trx_t *victim_trx);
1028#include "lock0priv.ic"
1030/** Iterate over record locks matching <space, page_no, heap_no> */
1032 /* First is the previous lock, and second is the current lock. */
1033 /** Gets the next record lock on a page.
1034 @param[in] rec_id The record ID
1035 @param[in] lock The current lock
1036 @return matching lock or nullptr if end of list */
1037 static lock_t *advance(const RecID &rec_id, lock_t *lock) {
1039 ut_ad(lock->is_record_lock());
1041 while ((lock = static_cast<lock_t *>(lock->hash)) != nullptr) {
1042 ut_ad(lock->is_record_lock());
1044 if (rec_id.matches(lock)) {
1045 return (lock);
1046 }
1047 }
1049 ut_ad(lock == nullptr);
1050 return (nullptr);
1051 }
1053 /** Gets the first explicit lock request on a record.
1054 @param[in] list Record hash
1055 @param[in] rec_id Record ID
1056 @return first lock, nullptr if none exists */
1057 static lock_t *first(hash_cell_t *list, const RecID &rec_id) {
1060 auto lock = static_cast<lock_t *>(list->node);
1062 ut_ad(lock == nullptr || lock->is_record_lock());
1064 if (lock != nullptr && !rec_id.matches(lock)) {
1065 lock = advance(rec_id, lock);
1066 }
1068 return (lock);
1069 }
1071 /** Iterate over all the locks on a specific row
1072 @param[in] rec_id Iterate over locks on this row
1073 @param[in] f Function to call for each entry
1074 @param[in] hash_table The hash table to iterate over
1075 @return lock where the callback returned false */
1076 template <typename F>
1077 static const lock_t *for_each(const RecID &rec_id, F &&f,
1078 hash_table_t *hash_table = lock_sys->rec_hash) {
1081 auto list = hash_get_nth_cell(
1082 hash_table, hash_calc_cell_id(rec_id.m_hash_value, hash_table));
1084 for (auto lock = first(list, rec_id); lock != nullptr;
1085 lock = advance(rec_id, lock)) {
1086 ut_ad(lock->is_record_lock());
1088 if (!std::forward<F>(f)(lock)) {
1089 return (lock);
1090 }
1091 }
1093 return (nullptr);
1094 }
1097namespace locksys {
1099 public:
1101 static void exclusive_latch(ut::Location location) {
1103 }
1106/** Temporarily releases trx->mutex, latches the lock-sys shard containing
1107peeked_lock and latches trx->mutex again and calls f under protection of both
1108latches. The latch on lock-sys shard will be released immediately after f
1109returns. It is a responsibility of the caller to handle shared lock-sys latch,
1110trx->mutex and verify inside f that the trx has not been finished, and the lock
1111was not released meanwhile.
1112@param[in] peeked_lock A lock of the trx. (While trx->mutex is held it can't
1113 be freed, but can be released). It is used to
1114 determine the lock-sys shard to latch.
1115@param[in] f The callback to call once the lock-sys shard is
1116 latched and trx->mutex is relatched.
1117@return The value returned by f.
1119template <typename F>
1120auto latch_peeked_shard_and_do(const lock_t *peeked_lock, F &&f) {
1122 const trx_t *trx = peeked_lock->trx;
1123 ut_ad(trx_mutex_own(trx));
1124 ut_ad(peeked_lock->trx == trx);
1125 /* peeked_wait_lock points to a lock struct which will not be freed while we
1126 hold trx->mutex. Thus it is safe to inspect the peeked_wait_lock's
1127 rec_lock.page_id and tab_lock.table. We have to make a copy of them, though,
1128 before releasing trx->mutex. */
1129 if (peeked_lock->is_record_lock()) {
1130 const auto sharded_by = peeked_lock->rec_lock.page_id;
1131 trx_mutex_exit(trx);
1132 DEBUG_SYNC_C("try_relatch_trx_and_shard_and_do_noted_expected_version");
1135 return std::forward<F>(f)();
1136 } else {
1137 /*Once we release the trx->mutex, the trx may release locks on table and
1138 commit, which in extreme case could lead to freeing the dict_table_t
1139 object, so we have to copy its id first. */
1140 const auto sharded_by = peeked_lock->tab_lock.table->id;
1141 trx_mutex_exit(trx);
1144 return std::forward<F>(f)();
1145 }
1148/** Given a pointer to trx (which the caller guarantees will not be freed) and
1149the expected value of trx->version, will call the provided function f, only if
1150the trx is still in expected version and waiting for a lock, within a critical
1151section which holds latches on the trx, and the shard containing the waiting
1152lock. If the transaction has meanwhile finished waiting for a lock, or committed
1153or rolled back etc. the f will not be called.
1154It may happen that the lock for which the trx is waiting during exectuion of f
1155is not the same as the lock it was waiting at the moment of invocation.
1156@param[in] trx_version The version of the trx that we intend to wake up
1157@param[in] f The callback to call if trx is still waiting for a
1158 lock and is still in version trx_version
1160template <typename F>
1161void run_if_waiting(const TrxVersion trx_version, F &&f) {
1162 const trx_t *trx = trx_version.m_trx;
1163 /* This code would be much simpler with Global_exclusive_latch_guard.
1164 Unfortunately, this lead to long semaphore waits when thousands of
1165 transactions were taking thousands of locks and timing out. Therefore we use
1166 the following tricky code to instead only latch the single shard which
1167 contains the trx->lock.wait_lock. This is a bit difficult, because during
1168 B-tree reorganization a record lock might be removed from one page and moved
1169 to another, temporarily setting wait_lock to nullptr. This should be very
1170 rare and short. In most cases this while loop should do just one iteration
1171 and proceed along a happy path through all ifs. Another reason wait_lock
1172 might become nullptr is because we were granted the lock meanwhile, in which
1173 case the trx->lock.blocking_trx is first set to nullptr */
1174 do {
1175 if (!trx->lock.wait_lock.load()) {
1176 continue;
1177 }
1179 /* We can't use IB_mutex_guard with trx->mutex, as trx_mutex_enter has
1180 custom logic. We want to release trx->mutex before ut_delay or return. */
1181 trx_mutex_enter(trx);
1182 auto guard = create_scope_guard([trx]() { trx_mutex_exit(trx); });
1183 if (trx->version != trx_version.m_version) {
1184 return;
1185 }
1186 if (const lock_t *peeked_wait_lock = trx->lock.wait_lock.load()) {
1187 const bool retry = latch_peeked_shard_and_do(peeked_wait_lock, [&]() {
1188 ut_ad(trx_mutex_own(trx));
1189 if (trx->version != trx_version.m_version) {
1190 return false;
1191 }
1192 if (peeked_wait_lock != trx->lock.wait_lock.load()) {
1193 /* If wait_lock has changed, then in case of record lock it might have
1194 been moved during B-tree reorganization, so we retry. In case of a
1195 table lock the wait_lock can not be "moved" so it had to be released
1196 permanently and there's no point in retrying.*/
1197 return peeked_wait_lock->is_record_lock();
1198 }
1199 std::forward<F>(f)();
1200 ut_ad(trx_mutex_own(trx));
1201 return false;
1202 });
1203 if (!retry) {
1204 return;
1205 }
1206 }
1207 /* wait_lock appears to be null. If blocking_trx isn't nullptr, then
1208 probably the wait_lock will soon be restored, otherwise we can give up */
1209 } while (trx->lock.blocking_trx.load() && ut_delay(10));
1211} // namespace locksys
1213#endif /* lock0priv_h */
static mysql_service_status_t init()
Component initialization.
void btr_assert_not_corrupted(const buf_block_t *block, const dict_index_t *index)
Assert that a B-tree page is not corrupted.
Definition: btr0btr.h:154
Create record locks.
Definition: lock0priv.h:657
ulint m_mode
Lock mode requested.
Definition: lock0priv.h:862
RecLock(dict_index_t *index, const buf_block_t *block, ulint heap_no, ulint mode)
Definition: lock0priv.h:711
RecID m_rec_id
The record lock tuple {space, page_no, heap_no}.
Definition: lock0priv.h:874
RecLock(que_thr_t *thr, dict_index_t *index, const RecID &rec_id, ulint mode)
Definition: lock0priv.h:665
static bool is_predicate_lock(ulint mode)
Definition: lock0priv.h:848
size_t lock_size() const
Definition: lock0priv.h:760
static size_t lock_size(const page_t *page)
Calculate the record lock physical size required, non-predicate lock.
Definition: lock0priv.h:837
RecLock(que_thr_t *thr, dict_index_t *index, const buf_block_t *block, ulint heap_no, ulint mode)
Definition: lock0priv.h:683
static size_t lock_size(ulint mode)
Calculate the record lock physical size required for a predicate lock.
Definition: lock0priv.h:802
size_t m_size
Size of the record lock in bytes.
Definition: lock0priv.h:866
trx_t * m_trx
Transaction requesting the record lock.
Definition: lock0priv.h:858
void init(const page_t *page)
Setup the context from the requirements.
Definition: lock0priv.h:779
que_thr_t * m_thr
The query thread of the transaction.
Definition: lock0priv.h:854
dict_index_t * m_index
Index on which the record lock is required.
Definition: lock0priv.h:870
RecLock(dict_index_t *index, const RecID &rec_id, ulint mode)
Definition: lock0priv.h:699
Definition: hash0hash.h:374
A RAII helper which latches global_latch in shared mode during constructor, and unlatches it during d...
Definition: lock0guards.h:71
void x_lock(ut::Location location)
Definition: lock0latches.h:141
void x_unlock()
Definition: lock0latches.h:142
Unique_sharded_rw_lock global_latch
Definition: lock0latches.h:254
A RAII helper which latches the mutex protecting given shard during constructor, and unlatches it dur...
Definition: lock0guards.h:91
Definition: lock0priv.h:1098
static void exclusive_latch(ut::Location location)
Definition: lock0priv.h:1101
static void exclusive_unlatch()
Definition: lock0priv.h:1100
Page identifier.
Definition: buf0types.h:207
int page
Definition: db0err.h:39
static bool dict_index_is_online_ddl(const dict_index_t *index)
Determines if a secondary index is being or has been created online, or if the table is being rebuilt...
Data dictionary global types.
#define DEBUG_SYNC_C(_sync_point_name_)
Definition: my_sys.h:214
The simple hash table utility.
static hash_cell_t * hash_get_nth_cell(hash_table_t *table, size_t n)
Gets the nth cell in a hash table.
static uint64_t hash_calc_cell_id(uint64_t hash_value, hash_table_t *table)
Calculates the cell index from a hashed value for a specified hash table.
#define UINT32_MAX
constexpr uint32_t LOCK_PRDT_PAGE
Page lock.
Definition: lock0lock.h:1001
constexpr uint32_t LOCK_MODE_MASK
Lock modes and types.
Definition: lock0lock.h:963
constexpr uint32_t LOCK_PREDICATE
Predicate lock.
Definition: lock0lock.h:999
constexpr uint32_t LOCK_WAIT
Waiting lock flag; when set, it means that the lock has not yet been granted, it is just waiting for ...
Definition: lock0lock.h:976
static uint64_t lock_rec_hash_value(const page_id_t &page_id)
Calculates the hash value of a page file address: used in inserting or searching for a lock in the ha...
constexpr uint32_t LOCK_ORDINARY
this flag denotes an ordinary next-key lock in contrast to LOCK_GAP or LOCK_REC_NOT_GAP
Definition: lock0lock.h:980
constexpr uint32_t LOCK_TYPE_MASK
mask used to extract lock type from the type_mode field in a lock
Definition: lock0lock.h:970
constexpr uint32_t LOCK_INSERT_INTENTION
this bit is set when we place a waiting gap type record lock request in order to let an insert of an ...
Definition: lock0lock.h:997
constexpr uint32_t LOCK_TABLE
Lock types.
Definition: lock0lock.h:966
lock_sys_t * lock_sys
The lock system.
static hash_table_t * lock_hash_get(ulint mode)
Get the lock hash table.
constexpr uint32_t LOCK_GAP
when this bit is set, it means that the lock holds only on the gap before the record; for instance,...
Definition: lock0lock.h:985
constexpr uint32_t LOCK_REC_NOT_GAP
this bit means that the lock is only on the index record and does NOT block inserts to the gap before...
Definition: lock0lock.h:991
constexpr uint32_t LOCK_REC
record lock
Definition: lock0lock.h:968
struct lock_prdt lock_prdt_t
static ulint lock_mode_compatible(enum lock_mode mode1, enum lock_mode mode2)
Calculates if lock mode 1 is compatible with lock mode 2.
static lock_t * lock_rec_get_first_on_page(hash_table_t *lock_hash, const buf_block_t *block)
Gets the first record lock on a page, where the page is identified by a pointer to it.
static const byte lock_strength_matrix[5][5]
Definition: lock0priv.h:577
static bool lock_mode_is_next_key_lock(ulint mode)
Checks if the mode is LOCK_S or LOCK_X (possibly ORed with LOCK_WAIT or LOCK_REC) which means the loc...
Definition: lock0priv.h:118
constexpr uint32_t MAX_STACK_SIZE
Maximum depth of the DFS stack.
Definition: lock0priv.h:586
void lock_reset_wait_and_release_thread_if_suspended(lock_t *lock)
This function is a wrapper around several functions which need to be called in particular order to wa...
static const ulint lock_types
The count of the types of locks.
Definition: lock0priv.h:879
static const byte lock_compatibility_matrix[5][5]
Definition: lock0priv.h:560
constexpr uint32_t PRDT_HEAPNO
Definition: lock0priv.h:588
static bool lock_mode_stronger_or_eq(enum lock_mode mode1, enum lock_mode mode2)
Calculates if lock mode 1 is stronger or equal to lock mode 2.
void lock_notify_about_deadlock(const ut::vector< const trx_t * > &trxs_on_cycle, const trx_t *victim_trx)
Handles writing the information about found deadlock to the log files and caches it for future lock_l...
static const lock_t * lock_rec_get_next_const(ulint heap_no, const lock_t *lock)
Gets the next explicit lock request on a record.
static const ulint LOCK_PAGE_BITMAP_MARGIN
Definition: lock0priv.h:309
static const lock_t * lock_rec_get_next_on_page_const(const lock_t *lock)
Gets the first or next record lock on a page.
static lock_t * lock_rec_get_next_on_page(lock_t *lock)
Gets the first or next record lock on a page.
static enum lock_mode lock_get_mode(const lock_t *lock)
Gets the mode of a lock.
std::ostream & operator<<(std::ostream &out, const lock_table_t &lock)
The global output operator is overloaded to conveniently print the lock_table_t object into the given...
Definition: lock0priv.h:78
static ulint lock_get_wait(const lock_t *lock)
Gets the wait flag of a lock.
bool lock_print_waits
static lock_t * lock_rec_get_first(hash_table_t *hash, const buf_block_t *block, ulint heap_no)
Gets the first explicit lock request on a record.
static trx_id_t lock_clust_rec_some_has_impl(const rec_t *rec, const dict_index_t *index, const ulint *offsets)
Checks if some transaction has an implicit x-lock on a record in a clustered index.
static bool lock_rec_get_nth_bit(const lock_t *lock, ulint i)
Gets the nth bit of a record lock.
static lock_t * lock_rec_get_first_on_page_addr(hash_table_t *lock_hash, const page_id_t &page_id)
Gets the first record lock on a page, where the page is identified by its file address.
void lock_cancel_waiting_and_release(trx_t *trx)
Cancels a waiting lock request and releases possible other transactions waiting behind it.
static uint32_t lock_get_type_low(const lock_t *lock)
Gets the type of a lock.
static bool lock_table_has(const trx_t *trx, const dict_table_t *table, enum lock_mode mode)
Checks if a transaction has the specified table lock, or stronger.
Record locking request status.
Definition: lock0priv.h:590
Failed to acquire a lock.
Definition: lock0priv.h:592
Succeeded in acquiring a lock (implicit or already acquired)
Definition: lock0priv.h:594
Explicitly created a new lock.
Definition: lock0priv.h:596
static void lock_rec_set_nth_bit(lock_t *lock, ulint i)
Sets the nth bit of a record lock to true.
static lock_t * lock_rec_get_next(ulint heap_no, lock_t *lock)
Gets the next explicit lock request on a record.
static uint32_t lock_rec_get_n_bits(const lock_t *lock)
Gets the number of bits in a record lock bitmap.
const lock_t * lock_rec_get_prev(const lock_t *in_lock, ulint heap_no)
Gets the previous record lock set on a record.
Lock module internal inline methods.
const char * lock_mode_string(enum lock_mode mode)
Convert the given enum value into string.
Definition: lock0types.h:67
Definition: lock0types.h:52
Definition: lock0types.h:55
Definition: lock0types.h:56
unsigned long long int ulonglong
Definition: my_inttypes.h:56
Provides atomic access in shared-exclusive modes.
Definition: shared_spin_lock.h:79
Definition: lock0guards.h:34
bool owns_page_shard(const page_id_t &page_id)
Tests if given page shard can be safely accessed by the current thread.
auto latch_peeked_shard_and_do(const lock_t *peeked_lock, F &&f)
Temporarily releases trx->mutex, latches the lock-sys shard containing peeked_lock and latches trx->m...
Definition: lock0priv.h:1120
void run_if_waiting(const TrxVersion trx_version, F &&f)
Given a pointer to trx (which the caller guarantees will not be freed) and the expected value of trx-...
Definition: lock0priv.h:1161
bool owns_shared_global_latch()
Tests if lock_sys latch is owned in shared mode by the current thread.
size_t size(const char *const c)
Definition: base64.h:46
static mysql_service_status_t create(const char *service_names[], reference_caching_channel *out_channel) noexcept
Definition: gcs_xcom_synode.h:64
Definition: file_handle.h:61
bool wait_for(TCondition cond, std::chrono::steady_clock::duration max_wait)
Delays execution for at most max_wait or returns earlier if cond becomes true.
Definition: ut0ut.ic:131
std::basic_ostringstream< char, std::char_traits< char >, ut::allocator< char > > ostringstream
Specialization of basic_ostringstream which uses ut::allocator.
Definition: ut0new.h:2870
std::vector< T, ut::allocator< T > > vector
Specialization of vector which uses allocator.
Definition: ut0new.h:2874
std::list< T, ut::allocator< T > > list
Specialization of list which uses ut_allocator.
Definition: ut0new.h:2878
static uint16_t page_dir_get_n_heap(const page_t *page)
Gets the number of records in the heap.
constexpr ulint PAGE_HEAP_NO_INFIMUM
Page infimum.
Definition: page0types.h:131
constexpr ulint PAGE_HEAP_NO_SUPREMUM
Page supremum.
Definition: page0types.h:133
byte page_t
Type of the index page.
Definition: page0types.h:152
static trx_t * thr_get_trx(que_thr_t *thr)
Gets the trx of a query thread.
byte rec_t
Definition: rem0types.h:41
required string type
Definition: replication_group_member_actions.proto:34
Scope_guard< TLambda > create_scope_guard(const TLambda rollback_lambda)
Definition: scope_guard.h:61
bool srv_read_only_mode
Set if InnoDB must operate in read-only mode.
Iterate over record locks matching <space, page_no, heap_no>
Definition: lock0priv.h:1031
static lock_t * advance(const RecID &rec_id, lock_t *lock)
Gets the next record lock on a page.
Definition: lock0priv.h:1037
static lock_t * first(hash_cell_t *list, const RecID &rec_id)
Gets the first explicit lock request on a record.
Definition: lock0priv.h:1057
static const lock_t * for_each(const RecID &rec_id, F &&f, hash_table_t *hash_table=lock_sys->rec_hash)
Iterate over all the locks on a specific row.
Definition: lock0priv.h:1077
Record lock ID.
Definition: lock0priv.h:601
bool is_supremum() const
Definition: lock0priv.h:633
uint64_t m_hash_value
Hash generated from record's location which will be used to get lock queue for this record.
Definition: lock0priv.h:652
uint64_t hash_value() const
Definition: lock0priv.h:630
const page_id_t & get_page_id() const
Definition: lock0priv.h:640
RecID(page_id_t page_id, uint32_t heap_no)
Definition: lock0priv.h:613
RecID(const buf_block_t *block, ulint heap_no)
Definition: lock0priv.h:625
uint32_t m_heap_no
Heap number within the page.
Definition: lock0priv.h:647
bool matches(const lock_t *lock) const
Definition: lock0priv.ic:296
page_id_t m_page_id
Tablespace ID and page number within space
Definition: lock0priv.h:643
RecID(const lock_t *lock, ulint heap_no)
Definition: lock0priv.h:605
Definition: trx0types.h:598
uint64_t m_version
Definition: trx0types.h:602
trx_t * m_trx
Definition: trx0types.h:601
The buffer control block structure.
Definition: buf0buf.h:1747
byte * frame
pointer to buffer frame which is of size UNIV_PAGE_SIZE, and aligned to an address divisible by UNIV_...
Definition: buf0buf.h:1769
Data structure for an index.
Definition: dict0mem.h:1046
Data structure for a database table.
Definition: dict0mem.h:1909
table_id_t id
Id of the table.
Definition: dict0mem.h:1970
table_name_t name
Table name.
Definition: dict0mem.h:1984
Definition: hash0hash.h:61
Definition: lock0prdt.h:40
Record lock for a page.
Definition: lock0priv.h:83
std::ostream & print(std::ostream &out) const
Print the record lock into the given output stream.
Definition: lock0priv.h:101
uint32_t n_bits
number of bits in the lock bitmap; Must be divisible by 8.
Definition: lock0priv.h:90
page_id_t page_id
The id of the page on which records referenced by this lock's bitmap are located.
Definition: lock0priv.h:86
locksys::Latches latches
The latches protecting queues of record and table locks.
Definition: lock0lock.h:1022
hash_table_t * rec_hash
The hash table of the record (LOCK_REC) locks, except for predicate (LOCK_PREDICATE) and predicate pa...
Definition: lock0lock.h:1026
Lock struct; protected by lock_sys latches.
Definition: lock0priv.h:136
bool is_record_not_gap() const
Definition: lock0priv.h:203
dict_index_t * index
Index for a record lock.
Definition: lock0priv.h:144
lock_table_t tab_lock
Table lock.
Definition: lock0priv.h:152
lock_mode mode() const
Definition: lock0priv.h:224
const char * type_string() const
Definition: lock0priv.h:245
lock_rec_t rec_lock
Record lock.
Definition: lock0priv.h:155
bool includes_supremum() const
Definition: lock0priv.h:216
bool is_gap() const
Definition: lock0priv.h:200
ulonglong m_psi_internal_thread_id
Performance schema thread that created the lock.
Definition: lock0priv.h:161
bool is_insert_intention() const
Definition: lock0priv.h:211
bool is_record_lock() const
Determine if the lock object is a record lock.
Definition: lock0priv.h:188
trx_que_t trx_que_state() const
Definition: lock0priv.h:233
std::ostream & print(std::ostream &out) const
Print the lock object into the given output stream.
Definition: lock0priv.h:284
bool is_waiting() const
Definition: lock0priv.h:197
lock_t * hash
Hash chain node for a record lock.
Definition: lock0priv.h:148
ulonglong m_psi_event_id
Performance schema event that created the lock.
Definition: lock0priv.h:164
uint32_t type_mode
The lock type and mode bit flags.
Definition: lock0priv.h:170
trx_t * trx
transaction owning the lock
Definition: lock0priv.h:138
uint64_t m_seq
Timestamp when it was created.
Definition: lock0priv.h:174
bool is_next_key_lock() const
Definition: lock0priv.h:206
bool is_predicate() const
Determine if it is predicate lock.
Definition: lock0priv.h:192
uint32_t type() const
Definition: lock0priv.h:221
UT_LIST_NODE_T(lock_t) trx_locks
list of the locks of the transaction
void unlock_gap_lock()
Unlock the GAP Lock part of this Next Key Lock.
Definition: lock0priv.h:178
hash_table_t * hash_table() const
Get lock hash table.
Definition: lock0priv.h:230
A table lock.
Definition: lock0priv.h:53
std::ostream & print(std::ostream &out) const
Print the table lock into the given output stream.
Definition: lock0priv.h:68
dict_table_t * table
database table in dictionary cache
Definition: lock0priv.h:54
list of locks on the same table
Definition: lock0priv.h:57
Definition: que0que.h:242
std::atomic< trx_t * > blocking_trx
If this transaction is waiting for a lock, then blocking_trx points to a transaction which holds a co...
Definition: trx0trx.h:448
std::atomic< lock_t * > wait_lock
The lock request of this transaction is waiting for.
Definition: trx0trx.h:469
trx_que_t que_state
valid when trx->state == TRX_STATE_ACTIVE: TRX_QUE_RUNNING, TRX_QUE_LOCK_WAIT, ...
Definition: trx0trx.h:416
Definition: trx0trx.h:684
std::atomic_uint64_t version
Version of this instance.
Definition: trx0trx.h:1083
trx_lock_t lock
Information about the transaction locks and state.
Definition: trx0trx.h:831
Definition: ut0core.h:36
#define trx_mutex_enter_first_of_two(t)
Acquire the trx->mutex (and indicate we might request one more).
Definition: trx0trx.h:1394
#define trx_mutex_exit(t)
Release the trx->mutex.
Definition: trx0trx.h:1397
#define trx_mutex_enter(t)
Acquire the trx->mutex (and promise not to request any more).
Definition: trx0trx.h:1391
bool trx_mutex_own(const trx_t *trx)
Test if trx->mutex is owned by the current thread.
Definition: trx0trx.h:1354
Transaction system global type definitions.
Transaction execution states when trx->state == TRX_STATE_ACTIVE.
Definition: trx0types.h:71
ib_id_t trx_id_t
Transaction identifier (DB_TRX_ID, DATA_TRX_ID)
Definition: trx0types.h:138
Version control for database, common definitions, and include files.
constexpr size_t UNIV_WORD_SIZE
MySQL config.h generated by CMake will define SIZEOF_LONG in Posix.
Definition: univ.i:278
unsigned long int ulint
Definition: univ.i:406
#define UT_ARR_SIZE(a)
Definition: univ.i:524
Definition: ut0core.h:73
#define ut_error
Abort execution.
Definition: ut0dbg.h:101
#define ut_ad(EXPR)
Debug assertion.
Definition: ut0dbg.h:105
A helper for the UT_LIST_BASE_NODE_T_EXTERN which declares a node getter struct which extracts member...
Definition: ut0lst.h:270
#define UT_LIST_NODE_T(t)
Macro used for legacy reasons.
Definition: ut0lst.h:64
ulint ut_delay(ulint delay)
Runs an idle loop on CPU.
static task_env * retry
static void prepare(pax_msg *p, pax_op op)