MySQL 9.1.0
Source Code Documentation
|
Query execution. More...
#include "sql/sql_executor.h"
#include <algorithm>
#include <atomic>
#include <bit>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "field_types.h"
#include "mem_root_deque.h"
#include "my_alloc.h"
#include "my_base.h"
#include "my_bitmap.h"
#include "my_byteorder.h"
#include "my_checksum.h"
#include "my_dbug.h"
#include "my_hash_combine.h"
#include "my_sqlcommand.h"
#include "my_sys.h"
#include "my_table_map.h"
#include "mysql/components/services/bits/psi_bits.h"
#include "mysql/components/services/log_builtins.h"
#include "mysql/my_loglevel.h"
#include "mysql/strings/m_ctype.h"
#include "mysqld_error.h"
#include "prealloced_array.h"
#include "sql-common/json_dom.h"
#include "sql/current_thd.h"
#include "sql/field.h"
#include "sql/filesort.h"
#include "sql/handler.h"
#include "sql/item.h"
#include "sql/item_cmpfunc.h"
#include "sql/item_func.h"
#include "sql/item_sum.h"
#include "sql/iterators/basic_row_iterators.h"
#include "sql/iterators/row_iterator.h"
#include "sql/iterators/timing_iterator.h"
#include "sql/join_optimizer/access_path.h"
#include "sql/join_optimizer/bit_utils.h"
#include "sql/join_optimizer/cost_model.h"
#include "sql/join_optimizer/join_optimizer.h"
#include "sql/join_optimizer/materialize_path_parameters.h"
#include "sql/join_optimizer/relational_expression.h"
#include "sql/join_optimizer/replace_item.h"
#include "sql/join_optimizer/walk_access_paths.h"
#include "sql/join_type.h"
#include "sql/key.h"
#include "sql/mem_root_array.h"
#include "sql/mysqld.h"
#include "sql/nested_join.h"
#include "sql/opt_costmodel.h"
#include "sql/opt_explain_format.h"
#include "sql/opt_trace.h"
#include "sql/query_options.h"
#include "sql/query_term.h"
#include "sql/record_buffer.h"
#include "sql/sort_param.h"
#include "sql/sql_array.h"
#include "sql/sql_class.h"
#include "sql/sql_cmd.h"
#include "sql/sql_const.h"
#include "sql/sql_delete.h"
#include "sql/sql_list.h"
#include "sql/sql_optimizer.h"
#include "sql/sql_resolver.h"
#include "sql/sql_select.h"
#include "sql/sql_sort.h"
#include "sql/sql_tmp_table.h"
#include "sql/sql_update.h"
#include "sql/table.h"
#include "sql/temp_table_param.h"
#include "sql/visible_fields.h"
#include "sql/window.h"
#include "template_utils.h"
#include "thr_lock.h"
Enumerations | |
enum class | Substructure { Substructure::NONE , Substructure::OUTER_JOIN , Substructure::SEMIJOIN , Substructure::WEEDOUT } |
Functions | |
static int | read_system (TABLE *table) |
Read a constant table when there is at most one matching row, using a table scan. More... | |
static bool | alloc_group_fields (JOIN *join, ORDER *group) |
Get a list of buffers for saving last group. More... | |
string | RefToString (const Index_lookup &ref, const KEY &key, bool include_nulls) |
static const char * | cft_name (Copy_func_type type) |
bool | has_rollup_result (Item *item) |
Checks if an item has a ROLLUP NULL which needs to be written to temp table. More... | |
bool | is_rollup_group_wrapper (const Item *item) |
Item * | unwrap_rollup_group (Item *item) |
bool | prepare_sum_aggregators (Item_sum **sum_funcs, bool need_distinct) |
bool | setup_sum_funcs (THD *thd, Item_sum **func_ptr) |
Call setup() for all sum functions. More... | |
void | init_tmptable_sum_functions (Item_sum **func_ptr) |
void | update_tmptable_sum_func (Item_sum **func_ptr, TABLE *tmp_table) |
Update record 0 in tmp_table from record 1. More... | |
bool | copy_funcs (Temp_table_param *param, const THD *thd, Copy_func_type type) |
Copy result of functions to record in tmp_table. More... | |
static bool | update_const_equal_items (THD *thd, Item *cond, JOIN_TAB *tab) |
Check appearance of new constant items in multiple equalities of a condition after reading a constant table. More... | |
void | setup_tmptable_write_func (QEP_TAB *tab, Opt_trace_object *trace) |
Setup write_func of QEP_tmp_table object. More... | |
static size_t | record_prefix_size (const TABLE *table) |
Find out how many bytes it takes to store the smallest prefix which covers all the columns that will be read from a table. More... | |
bool | set_record_buffer (TABLE *table, double expected_rows_to_fetch) |
Allocate a data buffer that the storage engine can use for fetching batches of records. More... | |
bool | ExtractConditions (Item *condition, Mem_root_array< Item * > *condition_parts) |
Split AND conditions into their constituent parts, recursively. More... | |
static bool | ContainsAnyMRRPaths (AccessPath *path) |
See if “path” has any MRR nodes; if so, we cannot optimize them away in PossiblyAttachFilter(), as the BKA iterator expects there to be a corresponding MRR iterator. More... | |
Item * | CreateConjunction (List< Item > *items) |
Create an AND conjunction of all given items. More... | |
AccessPath * | PossiblyAttachFilter (AccessPath *path, const vector< Item * > &conditions, THD *thd, table_map *conditions_depend_on_outer_tables) |
Return a new iterator that wraps "iterator" and that tests all of the given conditions (if any), ANDed together. More... | |
AccessPath * | CreateNestedLoopAccessPath (THD *thd, AccessPath *outer, AccessPath *inner, JoinType join_type, bool pfs_batch_mode) |
static AccessPath * | NewInvalidatorAccessPathForTable (THD *thd, AccessPath *path, QEP_TAB *qep_tab, plan_idx table_index_to_invalidate) |
static table_map | ConvertQepTabMapToTableMap (JOIN *join, qep_tab_map tables) |
AccessPath * | CreateBKAAccessPath (THD *thd, JOIN *join, AccessPath *outer_path, qep_tab_map left_tables, AccessPath *inner_path, qep_tab_map right_tables, TABLE *table, Table_ref *table_list, Index_lookup *ref, JoinType join_type) |
static AccessPath * | PossiblyAttachFilter (AccessPath *path, const vector< PendingCondition > &conditions, THD *thd, table_map *conditions_depend_on_outer_tables) |
static Item_func_trig_cond * | GetTriggerCondOrNull (Item *item) |
void | ConvertItemsToCopy (const mem_root_deque< Item * > &items, Field **fields, Temp_table_param *param) |
For historical reasons, derived table materialization and temporary table materialization didn't specify the fields to materialize in the same way. More... | |
static bool | IsJoinCondition (const Item *item, const QEP_TAB *qep_tab) |
static Item * | GetInnermostCondition (Item *item) |
static bool | CheckIfFieldsAvailableForCond (Item *item, table_map build_tables, table_map probe_tables) |
static void | AttachSemiJoinCondition (Item *join_cond, vector< PendingCondition > *join_conditions, QEP_TAB *current_table, qep_tab_map left_tables, plan_idx semi_join_table_idx) |
void | SplitConditions (Item *condition, QEP_TAB *current_table, vector< Item * > *predicates_below_join, vector< PendingCondition > *predicates_above_join, vector< PendingCondition > *join_conditions, plan_idx semi_join_table_idx, qep_tab_map left_tables) |
static void | MarkUnhandledDuplicates (SJ_TMP_TABLE *weedout, plan_idx weedout_start, plan_idx weedout_end, qep_tab_map *unhandled_duplicates) |
For a given duplicate weedout operation, figure out which tables are supposed to be deduplicated by it, and add those to unhandled_duplicates. More... | |
static AccessPath * | CreateWeedoutOrLimitAccessPath (THD *thd, AccessPath *path, SJ_TMP_TABLE *weedout_table) |
static AccessPath * | NewWeedoutAccessPathForTables (THD *thd, const qep_tab_map tables_to_deduplicate, QEP_TAB *qep_tabs, uint primary_tables, AccessPath *path) |
static Substructure | FindSubstructure (QEP_TAB *qep_tabs, const plan_idx first_idx, const plan_idx this_idx, const plan_idx last_idx, CallingContext calling_context, bool *add_limit_1, plan_idx *substructure_end, qep_tab_map *unhandled_duplicates) |
Given a range of tables (where we assume that we've already handled first_idx..(this_idx-1) as inner joins), figure out whether this is a semijoin, an outer join or a weedout. More... | |
static bool | IsTableScan (AccessPath *path) |
static AccessPath * | GetAccessPathForDerivedTable (THD *thd, QEP_TAB *qep_tab, AccessPath *table_path) |
static void | RecalculateTablePathCost (THD *thd, AccessPath *path, const Query_block &outer_query_block) |
Recalculate the cost of 'path'. More... | |
AccessPath * | MoveCompositeIteratorsFromTablePath (THD *thd, AccessPath *path, const Query_block &outer_query_block) |
For a MATERIALIZE access path, move any non-basic iterators (e.g. More... | |
static AccessPath * | GetTablePathBottom (AccessPath *table_path) |
Find the bottom of 'table_path', i.e. More... | |
AccessPath * | GetAccessPathForDerivedTable (THD *thd, Table_ref *table_ref, TABLE *table, bool rematerialize, Mem_root_array< const AccessPath * > *invalidators, bool need_rowid, AccessPath *table_path) |
static AccessPath * | GetTableAccessPath (THD *thd, QEP_TAB *qep_tab, QEP_TAB *qep_tabs) |
Get the RowIterator used for scanning the given table, with any required materialization operations done first. More... | |
void | SetCostOnTableAccessPath (const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path) |
void | SetCostOnNestedLoopAccessPath (const Cost_model_server &cost_model, const POSITION *pos_inner, AccessPath *path) |
void | SetCostOnHashJoinAccessPath (const Cost_model_server &cost_model, const POSITION *pos_outer, AccessPath *path) |
static bool | ConditionIsAlwaysTrue (Item *item) |
static table_map | GetPrunedTables (const AccessPath *path) |
Find all the tables below "path" that have been pruned and replaced by a ZERO_ROWS access path. More... | |
static AccessPath * | CreateHashJoinAccessPath (THD *thd, QEP_TAB *qep_tab, AccessPath *build_path, qep_tab_map build_tables, AccessPath *probe_path, qep_tab_map probe_tables, JoinType join_type, vector< Item * > *join_conditions, table_map *conditions_depend_on_outer_tables) |
static void | ExtractJoinConditions (const QEP_TAB *current_table, vector< Item * > *predicates, vector< Item * > *join_conditions) |
static bool | UseHashJoin (QEP_TAB *qep_tab) |
static bool | UseBKA (QEP_TAB *qep_tab) |
static bool | QueryMixesOuterBKAAndBNL (JOIN *join) |
static bool | InsideOuterOrAntiJoin (QEP_TAB *qep_tab) |
void | PickOutConditionsForTableIndex (int table_idx, vector< PendingCondition > *from, vector< PendingCondition > *to) |
void | PickOutConditionsForTableIndex (int table_idx, vector< PendingCondition > *from, vector< Item * > *to) |
AccessPath * | FinishPendingOperations (THD *thd, AccessPath *path, QEP_TAB *remove_duplicates_loose_scan_qep_tab, const vector< PendingCondition > &pending_conditions, table_map *conditions_depend_on_outer_tables) |
AccessPath * | ConnectJoins (plan_idx upper_first_idx, plan_idx first_idx, plan_idx last_idx, QEP_TAB *qep_tabs, THD *thd, CallingContext calling_context, vector< PendingCondition > *pending_conditions, vector< PendingInvalidator > *pending_invalidators, vector< PendingCondition > *pending_join_conditions, qep_tab_map *unhandled_duplicates, table_map *conditions_depend_on_outer_tables) |
For a given slice of the table list, build up the iterator tree corresponding to the tables in that slice. More... | |
static table_map | get_update_or_delete_target_tables (const JOIN *join) |
static AccessPath * | add_filter_access_path (THD *thd, AccessPath *path, Item *condition, const Query_block *query_block) |
int | do_sj_dups_weedout (THD *thd, SJ_TMP_TABLE *sjtbl) |
SemiJoinDuplicateElimination: Weed out duplicate row combinations. More... | |
int | report_handler_error (TABLE *table, int error) |
Help function when we get some an error from the table handler. More... | |
int | join_read_const_table (JOIN_TAB *tab, POSITION *pos) |
Reads content of constant table. More... | |
int | read_const (TABLE *table, Index_lookup *ref) |
static bool | cmp_field_value (Field *field, ptrdiff_t diff) |
static bool | group_rec_cmp (ORDER *group, uchar *rec0, uchar *rec1) |
Compare GROUP BY in from tmp table's record[0] and record[1]. More... | |
bool | table_rec_cmp (TABLE *table) |
Compare GROUP BY in from tmp table's record[0] and record[1]. More... | |
ulonglong | calc_field_hash (const Field *field, ulonglong *hash_val) |
Generate hash for a field. More... | |
static ulonglong | unique_hash_group (ORDER *group) |
Generate hash for unique constraint according to group-by list. More... | |
ulonglong | calc_row_hash (TABLE *table) |
Generate hash for unique_constraint for all visible fields of a table. More... | |
bool | check_unique_fields (TABLE *table) |
Check whether a row is already present in the tmp table. More... | |
bool | construct_lookup (THD *thd, TABLE *table, Index_lookup *ref) |
Copy the lookup key into the table ref's key buffer. More... | |
bool | make_group_fields (JOIN *main_join, JOIN *curr_join) |
allocate group fields or take prepared (cached). More... | |
int | update_item_cache_if_changed (List< Cached_item > &list) |
size_t | compute_ria_idx (const mem_root_deque< Item * > &fields, size_t i, size_t added_non_hidden_fields, size_t border) |
Compute the position mapping from fields to ref_item_array, cf. More... | |
bool | copy_fields (Temp_table_param *param, const THD *thd, bool reverse_copy) |
Make a copy of all simple SELECT'ed fields. More... | |
static bool | replace_embedded_rollup_references_with_tmp_fields (THD *thd, Item *item, mem_root_deque< Item * > *fields) |
For each rollup wrapper below the given item, replace it with a temporary field, e.g. More... | |
bool | change_to_use_tmp_fields (mem_root_deque< Item * > *fields, THD *thd, Ref_item_array ref_item_array, mem_root_deque< Item * > *res_fields, size_t added_non_hidden_fields, bool windowing) |
Change all funcs and sum_funcs to fields in tmp table, and create new list of all items. More... | |
static Item_rollup_group_item * | find_rollup_item_in_group_list (Item *item, Query_block *query_block) |
bool | replace_contents_of_rollup_wrappers_with_tmp_fields (THD *thd, Query_block *select, Item *item_arg) |
For each rollup wrapper below the given item, replace its argument with a temporary field, e.g. More... | |
bool | change_to_use_tmp_fields_except_sums (mem_root_deque< Item * > *fields, THD *thd, Query_block *select, Ref_item_array ref_item_array, mem_root_deque< Item * > *res_fields, size_t added_non_hidden_fields) |
Change all sum_func refs to fields to point at fields in tmp table. More... | |
bool | MaterializeIsDoingDeduplication (TABLE *table) |
(end of group Query_Executor) More... | |
void | set_count_examined_rows (AccessPath *path, bool count_examined_rows) |
For the given access path, set "count_examined_rows" to the value specified. More... | |
AccessPath * | create_table_access_path (THD *thd, TABLE *table, AccessPath *range_scan, Table_ref *table_ref, POSITION *position, bool count_examined_rows) |
create_table_access_path is used to scan by using a number of different methods. More... | |
unique_ptr_destroy_only< RowIterator > | init_table_iterator (THD *thd, TABLE *table, AccessPath *range_scan, Table_ref *table_ref, POSITION *position, bool ignore_not_found_rows, bool count_examined_rows) |
Creates an iterator for the given table, then calls Init() on the resulting iterator. More... | |
Variables | |
static constexpr size_t | MIN_RECORD_BUFFER_SIZE = 4 * 1024 |
The minimum size of the record buffer allocated by set_record_buffer(). More... | |
static constexpr size_t | MAX_RECORD_BUFFER_SIZE = 128 * 1024 |
The maximum size of the record buffer allocated by set_record_buffer(). More... | |
static constexpr double | RECORD_BUFFER_FRACTION = 0.1f |
How big a fraction of the estimated number of returned rows to make room for in the record buffer allocated by set_record_buffer(). More... | |
Query execution.
AccessPath * create_table_access_path | ( | THD * | thd, |
TABLE * | table, | ||
AccessPath * | range_scan, | ||
Table_ref * | table_ref, | ||
POSITION * | position, | ||
bool | count_examined_rows | ||
) |
create_table_access_path is used to scan by using a number of different methods.
Which method to use is set-up in this call so that you can create an iterator from the returned access path and fetch rows through said iterator afterwards.
thd | Thread handle |
table | Table the data [originally] comes from |
range_scan | AccessPath to scan the table with, or nullptr |
table_ref | Position for the table, must be non-nullptr for WITH RECURSIVE |
position | Place to get cost information from, or nullptr |
count_examined_rows | See AccessPath::count_examined_rows. |
unique_ptr_destroy_only< RowIterator > init_table_iterator | ( | THD * | thd, |
TABLE * | table, | ||
AccessPath * | range_scan, | ||
Table_ref * | table_ref, | ||
POSITION * | position, | ||
bool | ignore_not_found_rows, | ||
bool | count_examined_rows | ||
) |
Creates an iterator for the given table, then calls Init() on the resulting iterator.
Unlike create_table_iterator(), this can create iterators for sort buffer results (which are set in the TABLE object during query execution). Returns nullptr on failure.
bool MaterializeIsDoingDeduplication | ( | TABLE * | table | ) |
(end of group Query_Executor)
void set_count_examined_rows | ( | AccessPath * | path, |
bool | count_examined_rows | ||
) |
For the given access path, set "count_examined_rows" to the value specified.
For index merge scans, we set "count_examined_rows" for all the child paths too.
path | Access path (A range scan) |
count_examined_rows | See AccessPath::count_examined_rows. |