MySQL 8.4.2
Source Code Documentation
Query Executor

Enumerations

enum class  Substructure { Substructure::NONE , Substructure::OUTER_JOIN , Substructure::SEMIJOIN , Substructure::WEEDOUT }
 

Functions

static int read_system (TABLE *table)
 Read a constant table when there is at most one matching row, using a table scan. More...
 
static bool alloc_group_fields (JOIN *join, ORDER *group)
 Get a list of buffers for saving last group. More...
 
string RefToString (const Index_lookup &ref, const KEY &key, bool include_nulls)
 
bool has_rollup_result (Item *item)
 Checks if an item has a ROLLUP NULL which needs to be written to temp table. More...
 
bool is_rollup_group_wrapper (const Item *item)
 
Itemunwrap_rollup_group (Item *item)
 
bool prepare_sum_aggregators (Item_sum **sum_funcs, bool need_distinct)
 
bool setup_sum_funcs (THD *thd, Item_sum **func_ptr)
 Call setup() for all sum functions. More...
 
void init_tmptable_sum_functions (Item_sum **func_ptr)
 
void update_tmptable_sum_func (Item_sum **func_ptr, TABLE *tmp_table)
 Update record 0 in tmp_table from record 1. More...
 
bool copy_funcs (Temp_table_param *param, const THD *thd, Copy_func_type type)
 Copy result of functions to record in tmp_table. More...
 
static bool update_const_equal_items (THD *thd, Item *cond, JOIN_TAB *tab)
 Check appearance of new constant items in multiple equalities of a condition after reading a constant table. More...
 
void setup_tmptable_write_func (QEP_TAB *tab, Opt_trace_object *trace)
 Setup write_func of QEP_tmp_table object. More...
 
static size_t record_prefix_size (const TABLE *table)
 Find out how many bytes it takes to store the smallest prefix which covers all the columns that will be read from a table. More...
 
bool set_record_buffer (TABLE *table, double expected_rows_to_fetch)
 Allocate a data buffer that the storage engine can use for fetching batches of records. More...
 
bool ExtractConditions (Item *condition, Mem_root_array< Item * > *condition_parts)
 Split AND conditions into their constituent parts, recursively. More...
 
static bool ContainsAnyMRRPaths (AccessPath *path)
 See if “path” has any MRR nodes; if so, we cannot optimize them away in PossiblyAttachFilter(), as the BKA iterator expects there to be a corresponding MRR iterator. More...
 
ItemCreateConjunction (List< Item > *items)
 Create an AND conjunction of all given items. More...
 
AccessPathPossiblyAttachFilter (AccessPath *path, const vector< Item * > &conditions, THD *thd, table_map *conditions_depend_on_outer_tables)
 Return a new iterator that wraps "iterator" and that tests all of the given conditions (if any), ANDed together. More...
 
AccessPathCreateNestedLoopAccessPath (THD *thd, AccessPath *outer, AccessPath *inner, JoinType join_type, bool pfs_batch_mode)
 
static AccessPathNewInvalidatorAccessPathForTable (THD *thd, AccessPath *path, QEP_TAB *qep_tab, plan_idx table_index_to_invalidate)
 
static table_map ConvertQepTabMapToTableMap (JOIN *join, qep_tab_map tables)
 
AccessPathCreateBKAAccessPath (THD *thd, JOIN *join, AccessPath *outer_path, qep_tab_map left_tables, AccessPath *inner_path, qep_tab_map right_tables, TABLE *table, Table_ref *table_list, Index_lookup *ref, JoinType join_type)
 
static AccessPathPossiblyAttachFilter (AccessPath *path, const vector< PendingCondition > &conditions, THD *thd, table_map *conditions_depend_on_outer_tables)
 
static Item_func_trig_condGetTriggerCondOrNull (Item *item)
 
void ConvertItemsToCopy (const mem_root_deque< Item * > &items, Field **fields, Temp_table_param *param)
 For historical reasons, derived table materialization and temporary table materialization didn't specify the fields to materialize in the same way. More...
 
static bool IsJoinCondition (const Item *item, const QEP_TAB *qep_tab)
 
static ItemGetInnermostCondition (Item *item)
 
static bool CheckIfFieldsAvailableForCond (Item *item, table_map build_tables, table_map probe_tables)
 
static void AttachSemiJoinCondition (Item *join_cond, vector< PendingCondition > *join_conditions, QEP_TAB *current_table, qep_tab_map left_tables, plan_idx semi_join_table_idx)
 
void SplitConditions (Item *condition, QEP_TAB *current_table, vector< Item * > *predicates_below_join, vector< PendingCondition > *predicates_above_join, vector< PendingCondition > *join_conditions, plan_idx semi_join_table_idx, qep_tab_map left_tables)
 
static void MarkUnhandledDuplicates (SJ_TMP_TABLE *weedout, plan_idx weedout_start, plan_idx weedout_end, qep_tab_map *unhandled_duplicates)
 For a given duplicate weedout operation, figure out which tables are supposed to be deduplicated by it, and add those to unhandled_duplicates. More...
 
static AccessPathCreateWeedoutOrLimitAccessPath (THD *thd, AccessPath *path, SJ_TMP_TABLE *weedout_table)
 
static AccessPathNewWeedoutAccessPathForTables (THD *thd, const qep_tab_map tables_to_deduplicate, QEP_TAB *qep_tabs, uint primary_tables, AccessPath *path)
 
static Substructure FindSubstructure (QEP_TAB *qep_tabs, const plan_idx first_idx, const plan_idx this_idx, const plan_idx last_idx, CallingContext calling_context, bool *add_limit_1, plan_idx *substructure_end, qep_tab_map *unhandled_duplicates)
 Given a range of tables (where we assume that we've already handled first_idx..(this_idx-1) as inner joins), figure out whether this is a semijoin, an outer join or a weedout. More...
 
static bool IsTableScan (AccessPath *path)
 
static AccessPathGetAccessPathForDerivedTable (THD *thd, QEP_TAB *qep_tab, AccessPath *table_path)
 
static void RecalculateTablePathCost (THD *thd, AccessPath *path, const Query_block &outer_query_block)
 Recalculate the cost of 'path'. More...
 
AccessPathMoveCompositeIteratorsFromTablePath (THD *thd, AccessPath *path, const Query_block &outer_query_block)
 For a MATERIALIZE access path, move any non-basic iterators (e.g. More...
 
AccessPathGetAccessPathForDerivedTable (THD *thd, Table_ref *table_ref, TABLE *table, bool rematerialize, Mem_root_array< const AccessPath * > *invalidators, bool need_rowid, AccessPath *table_path)
 
static AccessPathGetTableAccessPath (THD *thd, QEP_TAB *qep_tab, QEP_TAB *qep_tabs)
 Get the RowIterator used for scanning the given table, with any required materialization operations done first. More...
 
void SetCostOnTableAccessPath (const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
 
void SetCostOnNestedLoopAccessPath (const Cost_model_server &cost_model, const POSITION *pos_inner, AccessPath *path)
 
void SetCostOnHashJoinAccessPath (const Cost_model_server &cost_model, const POSITION *pos_outer, AccessPath *path)
 
static bool ConditionIsAlwaysTrue (Item *item)
 
static table_map GetPrunedTables (const AccessPath *path)
 Find all the tables below "path" that have been pruned and replaced by a ZERO_ROWS access path. More...
 
static AccessPathCreateHashJoinAccessPath (THD *thd, QEP_TAB *qep_tab, AccessPath *build_path, qep_tab_map build_tables, AccessPath *probe_path, qep_tab_map probe_tables, JoinType join_type, vector< Item * > *join_conditions, table_map *conditions_depend_on_outer_tables)
 
static void ExtractJoinConditions (const QEP_TAB *current_table, vector< Item * > *predicates, vector< Item * > *join_conditions)
 
static bool UseHashJoin (QEP_TAB *qep_tab)
 
static bool UseBKA (QEP_TAB *qep_tab)
 
static bool QueryMixesOuterBKAAndBNL (JOIN *join)
 
static bool InsideOuterOrAntiJoin (QEP_TAB *qep_tab)
 
void PickOutConditionsForTableIndex (int table_idx, vector< PendingCondition > *from, vector< PendingCondition > *to)
 
void PickOutConditionsForTableIndex (int table_idx, vector< PendingCondition > *from, vector< Item * > *to)
 
AccessPathFinishPendingOperations (THD *thd, AccessPath *path, QEP_TAB *remove_duplicates_loose_scan_qep_tab, const vector< PendingCondition > &pending_conditions, table_map *conditions_depend_on_outer_tables)
 
AccessPathConnectJoins (plan_idx upper_first_idx, plan_idx first_idx, plan_idx last_idx, QEP_TAB *qep_tabs, THD *thd, CallingContext calling_context, vector< PendingCondition > *pending_conditions, vector< PendingInvalidator > *pending_invalidators, vector< PendingCondition > *pending_join_conditions, qep_tab_map *unhandled_duplicates, table_map *conditions_depend_on_outer_tables)
 For a given slice of the table list, build up the iterator tree corresponding to the tables in that slice. More...
 
static table_map get_update_or_delete_target_tables (const JOIN *join)
 
static AccessPathadd_filter_access_path (THD *thd, AccessPath *path, Item *condition, const Query_block *query_block)
 
int do_sj_dups_weedout (THD *thd, SJ_TMP_TABLE *sjtbl)
 SemiJoinDuplicateElimination: Weed out duplicate row combinations. More...
 
int report_handler_error (TABLE *table, int error)
 Help function when we get some an error from the table handler. More...
 
int join_read_const_table (JOIN_TAB *tab, POSITION *pos)
 Reads content of constant table. More...
 
int read_const (TABLE *table, Index_lookup *ref)
 
static bool cmp_field_value (Field *field, ptrdiff_t diff)
 
static bool group_rec_cmp (ORDER *group, uchar *rec0, uchar *rec1)
 Compare GROUP BY in from tmp table's record[0] and record[1]. More...
 
bool table_rec_cmp (TABLE *table)
 Compare GROUP BY in from tmp table's record[0] and record[1]. More...
 
ulonglong calc_field_hash (const Field *field, ulonglong *hash_val)
 Generate hash for a field. More...
 
static ulonglong unique_hash_group (ORDER *group)
 Generate hash for unique constraint according to group-by list. More...
 
ulonglong calc_row_hash (TABLE *table)
 Generate hash for unique_constraint for all visible fields of a table. More...
 
bool check_unique_fields (TABLE *table)
 Check whether a row is already present in the tmp table. More...
 
bool construct_lookup (THD *thd, TABLE *table, Index_lookup *ref)
 Copy the lookup key into the table ref's key buffer. More...
 
bool make_group_fields (JOIN *main_join, JOIN *curr_join)
 allocate group fields or take prepared (cached). More...
 
int update_item_cache_if_changed (List< Cached_item > &list)
 
static size_t compute_ria_idx (const mem_root_deque< Item * > &fields, size_t i, size_t added_non_hidden_fields, size_t border)
 Compute the position mapping from fields to ref_item_array, cf. More...
 
bool copy_fields (Temp_table_param *param, const THD *thd, bool reverse_copy)
 Make a copy of all simple SELECT'ed fields. More...
 
static bool replace_embedded_rollup_references_with_tmp_fields (THD *thd, Item *item, mem_root_deque< Item * > *fields)
 For each rollup wrapper below the given item, replace it with a temporary field, e.g. More...
 
bool change_to_use_tmp_fields (mem_root_deque< Item * > *fields, THD *thd, Ref_item_array ref_item_array, mem_root_deque< Item * > *res_fields, size_t added_non_hidden_fields, bool windowing)
 Change all funcs and sum_funcs to fields in tmp table, and create new list of all items. More...
 
static Item_rollup_group_itemfind_rollup_item_in_group_list (Item *item, Query_block *query_block)
 
bool replace_contents_of_rollup_wrappers_with_tmp_fields (THD *thd, Query_block *select, Item *item_arg)
 For each rollup wrapper below the given item, replace its argument with a temporary field, e.g. More...
 
bool change_to_use_tmp_fields_except_sums (mem_root_deque< Item * > *fields, THD *thd, Query_block *select, Ref_item_array ref_item_array, mem_root_deque< Item * > *res_fields, size_t added_non_hidden_fields)
 Change all sum_func refs to fields to point at fields in tmp table. More...
 
bool JOIN::create_intermediate_table (QEP_TAB *tab, const mem_root_deque< Item * > &tmp_table_fields, ORDER_with_src &tmp_table_group, bool save_sum_fields)
 Create a temporary table to be used for processing DISTINCT/ORDER BY/GROUP BY. More...
 
void JOIN::optimize_distinct ()
 Optimize distinct when used on a subset of the tables. More...
 
QEP_TAB::enum_op_type JOIN::get_end_select_func ()
 
void JOIN::create_access_paths ()
 Convert the executor structures to a set of access paths, storing the result in m_root_access_path. More...
 
void JOIN::create_access_paths_for_index_subquery ()
 
bool QEP_TAB::use_order () const
 Use ordering provided by chosen index? More...
 
AccessPathQEP_TAB::access_path ()
 Construct an access path for reading from this table in the query, using the access method that has been determined previously (e.g., table scan, ref access, optional sort afterwards, etc.). More...
 
bool JOIN::clear_fields (table_map *save_nullinfo)
 Set all column values from all input tables to NULL. More...
 
void JOIN::restore_fields (table_map save_nullinfo)
 Restore all result fields for all tables specified in save_nullinfo. More...
 
bool QEP_TAB::pfs_batch_update (const JOIN *join) const
 

Variables

static constexpr size_t MAX_RECORD_BUFFER_SIZE = 128 * 1024
 Maximum amount of space (in bytes) to allocate for a Record_buffer. More...
 
AccessPathJOIN::attach_access_path_for_update_or_delete (AccessPath *path) const
 
AccessPathJOIN::create_root_access_path_for_join ()
 
AccessPathJOIN::attach_access_paths_for_having_and_limit (AccessPath *path) const
 

Detailed Description

Enumeration Type Documentation

◆ Substructure

enum class Substructure
strong
Enumerator
NONE 
OUTER_JOIN 
SEMIJOIN 
WEEDOUT 

Function Documentation

◆ access_path()

AccessPath * QEP_TAB::access_path ( )

Construct an access path for reading from this table in the query, using the access method that has been determined previously (e.g., table scan, ref access, optional sort afterwards, etc.).

◆ add_filter_access_path()

static AccessPath * add_filter_access_path ( THD thd,
AccessPath path,
Item condition,
const Query_block query_block 
)
static

◆ alloc_group_fields()

static bool alloc_group_fields ( JOIN join,
ORDER group 
)
static

Get a list of buffers for saving last group.

Groups are saved in reverse order for easier check loop.

◆ attach_access_path_for_update_or_delete()

AccessPath * JOIN::attach_access_path_for_update_or_delete ( AccessPath path) const
private

◆ attach_access_paths_for_having_and_limit()

AccessPath * JOIN::attach_access_paths_for_having_and_limit ( AccessPath path) const
private

◆ AttachSemiJoinCondition()

static void AttachSemiJoinCondition ( Item join_cond,
vector< PendingCondition > *  join_conditions,
QEP_TAB current_table,
qep_tab_map  left_tables,
plan_idx  semi_join_table_idx 
)
static

◆ calc_field_hash()

ulonglong calc_field_hash ( const Field field,
ulonglong hash_val 
)

Generate hash for a field.

Returns
generated hash

◆ calc_row_hash()

ulonglong calc_row_hash ( TABLE table)

Generate hash for unique_constraint for all visible fields of a table.

Parameters
tablethe table for which we want a hash of its fields
Returns
the hash value

◆ change_to_use_tmp_fields()

bool change_to_use_tmp_fields ( mem_root_deque< Item * > *  fields,
THD thd,
Ref_item_array  ref_item_array,
mem_root_deque< Item * > *  res_fields,
size_t  added_non_hidden_fields,
bool  windowing 
)

Change all funcs and sum_funcs to fields in tmp table, and create new list of all items.

Parameters
fieldslist of all fields; should really be const, but Item does not always respect constness
thdTHD pointer
[out]ref_item_arrayarray of pointers to top elements of filed list
[out]res_fieldsnew list of all items
added_non_hidden_fieldsnumber of visible fields added by subquery to derived transformation
windowingtrue if creating a tmp table for windowing materialization
Returns
false if success, true if error

◆ change_to_use_tmp_fields_except_sums()

bool change_to_use_tmp_fields_except_sums ( mem_root_deque< Item * > *  fields,
THD thd,
Query_block select,
Ref_item_array  ref_item_array,
mem_root_deque< Item * > *  res_fields,
size_t  added_non_hidden_fields 
)

Change all sum_func refs to fields to point at fields in tmp table.

Change all funcs to be fields in tmp table.

This is used when we set up a temporary table, but aggregate functions (sum_funcs) cannot be evaluated yet, for instance because data is not sorted in the right order. (Otherwise, change_to_use_tmp_fields() would be used.)

Parameters
fieldslist of all fields; should really be const, but Item does not always respect constness
selectthe query block we are doing this to
thdTHD pointer
[out]ref_item_arrayarray of pointers to top elements of filed list
[out]res_fieldsnew list of items of select item list
added_non_hidden_fieldsnumber of visible fields added by subquery to derived transformation
Returns
false if success, true if error

◆ check_unique_fields()

bool check_unique_fields ( TABLE table)

Check whether a row is already present in the tmp table.

Calculates record's hash and checks whether the record given in table->record[0] is already present in the tmp table.

Parameters
tableJOIN_TAB of tmp table to check
Note
This function assumes record[0] is already filled by the caller. Depending on presence of table->group, it's or full list of table's fields are used to calculate hash.
Returns
false same record was found true record wasn't found

◆ CheckIfFieldsAvailableForCond()

static bool CheckIfFieldsAvailableForCond ( Item item,
table_map  build_tables,
table_map  probe_tables 
)
static

◆ clear_fields()

bool JOIN::clear_fields ( table_map save_nullinfo)

Set all column values from all input tables to NULL.

This is used when no rows are found during grouping: for FROM clause, a result row of all NULL values will be output; then SELECT list expressions get evaluated. E.g. SUM() will be NULL (the special "clear" value) and thus SUM() IS NULL will be true.

Note
Setting field values for input tables is a destructive operation, since it overwrite the NULL value flags with 1 bits. Rows from const tables are never re-read, hence their NULL value flags must be saved by this function and later restored by JOIN::restore_fields(). This is generally not necessary for non-const tables, since field values are overwritten when new rows are read.
Parameters
[out]save_nullinfoMap of tables whose fields were set to NULL, and for which NULL values must be restored. Should be set to all zeroes on entry to function.
Returns
false if success, true if error

◆ cmp_field_value()

static bool cmp_field_value ( Field field,
ptrdiff_t  diff 
)
static

◆ compute_ria_idx()

static size_t compute_ria_idx ( const mem_root_deque< Item * > &  fields,
size_t  i,
size_t  added_non_hidden_fields,
size_t  border 
)
static

Compute the position mapping from fields to ref_item_array, cf.

detailed explanation in change_to_use_tmp_fields_except_sums

◆ ConditionIsAlwaysTrue()

static bool ConditionIsAlwaysTrue ( Item item)
static

◆ ConnectJoins()

AccessPath * ConnectJoins ( plan_idx  upper_first_idx,
plan_idx  first_idx,
plan_idx  last_idx,
QEP_TAB qep_tabs,
THD thd,
CallingContext  calling_context,
vector< PendingCondition > *  pending_conditions,
vector< PendingInvalidator > *  pending_invalidators,
vector< PendingCondition > *  pending_join_conditions,
qep_tab_map unhandled_duplicates,
table_map conditions_depend_on_outer_tables 
)

For a given slice of the table list, build up the iterator tree corresponding to the tables in that slice.

It handles inner and outer joins, as well as semijoins (“first match”).

The join tree in MySQL is generally a left-deep tree of inner joins, so we can start at the left, make an inner join against the next table, join the result of that against the next table, etc.. However, a given sub-slice of the table list can be designated as an outer join, by setting first_inner() and last_inner() on the first table of said slice. (It is also set in some, but not all, of the other tables in the slice.) If so, we call ourselves recursively with that slice, put it as the right (inner) arm of an outer join, and then continue with our inner join.

Similarly, if a table N has set “first match” to table M (ie., jump back to table M whenever we see a non-filtered record in table N), then there is a subslice from [M+1,N] that we need to process recursively before putting it as the right side of a semijoin. Every semijoin can be implemented with a LIMIT 1, but for clarity and performance, we prefer to use a NestedLoopJoin with a special SEMI join type whenever possible. Sometimes, we have no choice, though (see the comments below). Note that we cannot use first_sj_inner() for detecting semijoins, as it is not updated when tables are reordered by the join optimizer. Outer joins and semijoins can nest, so we need to take some care to make sure that we pick the outermost structure to recurse on.

Conditions are a bit tricky. Conceptually, SQL evaluates conditions only after all tables have been joined; however, for efficiency reasons, we want to evaluate them as early as possible. As long as we are only dealing with inner joins, this is as soon as we've read all tables participating in the condition, but for outer joins, we need to wait until the join has happened. See pending_conditions below.

Parameters
upper_first_idxgives us the first table index of the other side of the join. Only valid if we are inside a substructure (outer join, semijoin or antijoin). I.e., if we are processing the right side of the query 't1 LEFT JOIN t2', upper_first_idx gives us the table index of 't1'. Used by hash join to determine the table map for each side of the join.
first_idxindex of the first table in the slice we are creating a tree for (inclusive)
last_idxindex of the last table in the slice we are creating a tree for (exclusive)
qep_tabsthe full list of tables we are joining
thdthe THD to allocate the iterators on
calling_contextwhat situation we have immediately around is in the tree (ie., whether we are called to resolve the inner part of an outer join, a semijoin, etc.); mostly used to avoid infinite recursion where we would process e.g. the same semijoin over and over again
pending_conditionsif nullptr, we are not at the right (inner) side of any outer join and can evaluate conditions immediately. If not, we need to push any WHERE predicates to that vector and evaluate them only after joins.
pending_invalidatorsa global list of CacheInvalidatorIterators we need to emit, but cannot yet due to pending outer joins. Note that unlike pending_conditions and pending_join_conditions, this is never nullptr, and is always the same pointer when recursing within the same JOIN.
pending_join_conditionsif not nullptr, we are at the inner side of semijoin/antijoin. The join iterator is created at the outer side, so any join conditions at the inner side needs to be pushed to this vector so that they can be attached to the join iterator. Note that this is currently only used by hash join.
[out]unhandled_duplicateslist of tables we should have deduplicated using duplicate weedout, but could not; append-only.
[out]conditions_depend_on_outer_tablesFor each condition we have applied on the inside of these iterators, their dependent tables are appended to this set. Thus, if conditions_depend_on_outer_tables contain something from outside the tables covered by [first_idx,last_idx) (ie., after translation from QEP_TAB indexes to table indexes), we cannot use a hash join, since the returned iterator depends on seeing outer rows when evaluating its conditions.

◆ construct_lookup()

bool construct_lookup ( THD thd,
TABLE table,
Index_lookup ref 
)

Copy the lookup key into the table ref's key buffer.

Parameters
thdpointer to the THD object
tablethe table to read
refinformation about the index lookup key
Return values
falseref key copied successfully
trueerror detected during copying of key

◆ ContainsAnyMRRPaths()

static bool ContainsAnyMRRPaths ( AccessPath path)
static

See if “path” has any MRR nodes; if so, we cannot optimize them away in PossiblyAttachFilter(), as the BKA iterator expects there to be a corresponding MRR iterator.

(This is a very rare case, so all we care about is that it should not crash.)

◆ ConvertItemsToCopy()

void ConvertItemsToCopy ( const mem_root_deque< Item * > &  items,
Field **  fields,
Temp_table_param param 
)

For historical reasons, derived table materialization and temporary table materialization didn't specify the fields to materialize in the same way.

Temporary table materialization used copy_funcs() to get the data into the Field pointers of the temporary table to be written, storing the lists in items_to_copy. (Originally, there was also copy_fields(), but it is no longer used for this purpose.)

However, derived table materialization used JOIN::fields (which is a set of Item, not Field!) for the same purpose, calling fill_record() (which originally was meant for INSERT and UPDATE) instead. Thus, we have to rewrite one to the other, so that we can have only one MaterializeIterator. We choose to rewrite JOIN::fields to items_to_copy.

TODO: The optimizer should output just one kind of structure directly.

◆ ConvertQepTabMapToTableMap()

static table_map ConvertQepTabMapToTableMap ( JOIN join,
qep_tab_map  tables 
)
static

◆ copy_fields()

bool copy_fields ( Temp_table_param param,
const THD thd,
bool  reverse_copy 
)

Make a copy of all simple SELECT'ed fields.

This is used in window functions, to copy fields to and from the frame buffer. (It used to be used in materialization, but now that is entirely done by copy_funcs(), even for Item_field.)

Parameters
paramRepresents the current temporary file being produced
thdThe current thread
reverse_copyIf true, copies fields back from the frame buffer tmp table to the output table's buffer, cf. bring_back_frame_row.
Returns
false if OK, true on error.

◆ copy_funcs()

bool copy_funcs ( Temp_table_param param,
const THD thd,
Copy_func_type  type 
)

Copy result of functions to record in tmp_table.

Uses the thread pointer to check for errors in some of the val_xxx() methods called by the save_in_result_field() function. TODO: make the Item::val_xxx() return error code

Parameters
paramCopy functions of tmp table specified by param
thdpointer to the current thread for error checking
typetype of function Items that need to be copied (used w.r.t windowing functions).
Return values
falseif OK
trueon error

◆ create_access_paths()

void JOIN::create_access_paths ( )
private

Convert the executor structures to a set of access paths, storing the result in m_root_access_path.

◆ create_access_paths_for_index_subquery()

void JOIN::create_access_paths_for_index_subquery ( )
private

◆ create_intermediate_table()

bool JOIN::create_intermediate_table ( QEP_TAB tab,
const mem_root_deque< Item * > &  tmp_table_fields,
ORDER_with_src tmp_table_group,
bool  save_sum_fields 
)
private

Create a temporary table to be used for processing DISTINCT/ORDER BY/GROUP BY.

Note
Will modify JOIN object wrt sort/group attributes
Parameters
tabthe JOIN_TAB object to attach created table to
tmp_table_fieldsList of items that will be used to define column types of the table.
tmp_table_groupGroup key to use for temporary table, empty if none.
save_sum_fieldsIf true, do not replace Item_sum items in tmp_fields list with Item_field items referring to fields in temporary table.
Returns
false on success, true on failure

If this is a window's OUT table, any final DISTINCT, ORDER BY will lead to windows showing use of tmp table in the final windowing step, so no need to signal use of tmp table unless we are here for another tmp table.

◆ create_root_access_path_for_join()

AccessPath * JOIN::create_root_access_path_for_join ( )
private

Helpers for create_access_paths.

◆ CreateBKAAccessPath()

AccessPath * CreateBKAAccessPath ( THD thd,
JOIN join,
AccessPath outer_path,
qep_tab_map  left_tables,
AccessPath inner_path,
qep_tab_map  right_tables,
TABLE table,
Table_ref table_list,
Index_lookup ref,
JoinType  join_type 
)

◆ CreateConjunction()

Item * CreateConjunction ( List< Item > *  items)

Create an AND conjunction of all given items.

If there are no items, returns nullptr. If there's only one item, returns that item.

◆ CreateHashJoinAccessPath()

static AccessPath * CreateHashJoinAccessPath ( THD thd,
QEP_TAB qep_tab,
AccessPath build_path,
qep_tab_map  build_tables,
AccessPath probe_path,
qep_tab_map  probe_tables,
JoinType  join_type,
vector< Item * > *  join_conditions,
table_map conditions_depend_on_outer_tables 
)
static

◆ CreateNestedLoopAccessPath()

AccessPath * CreateNestedLoopAccessPath ( THD thd,
AccessPath outer,
AccessPath inner,
JoinType  join_type,
bool  pfs_batch_mode 
)

◆ CreateWeedoutOrLimitAccessPath()

static AccessPath * CreateWeedoutOrLimitAccessPath ( THD thd,
AccessPath path,
SJ_TMP_TABLE weedout_table 
)
static

◆ do_sj_dups_weedout()

int do_sj_dups_weedout ( THD thd,
SJ_TMP_TABLE sjtbl 
)

SemiJoinDuplicateElimination: Weed out duplicate row combinations.

SYNPOSIS do_sj_dups_weedout() thd Thread handle sjtbl Duplicate weedout table

DESCRIPTION Try storing current record combination of outer tables (i.e. their rowids) in the temporary table. This records the fact that we've seen this record combination and also tells us if we've seen it before.

RETURN -1 Error 1 The row combination is a duplicate (discard it) 0 The row combination is not a duplicate (continue)

◆ ExtractConditions()

bool ExtractConditions ( Item condition,
Mem_root_array< Item * > *  condition_parts 
)

Split AND conditions into their constituent parts, recursively.

Conditions that are not AND conditions are appended unchanged onto condition_parts. E.g. if you have ((a AND b) AND c), condition_parts will contain [a, b, c], plus whatever it contained before the call.

◆ ExtractJoinConditions()

static void ExtractJoinConditions ( const QEP_TAB current_table,
vector< Item * > *  predicates,
vector< Item * > *  join_conditions 
)
static

◆ find_rollup_item_in_group_list()

static Item_rollup_group_item * find_rollup_item_in_group_list ( Item item,
Query_block query_block 
)
static

◆ FindSubstructure()

static Substructure FindSubstructure ( QEP_TAB qep_tabs,
const plan_idx  first_idx,
const plan_idx  this_idx,
const plan_idx  last_idx,
CallingContext  calling_context,
bool *  add_limit_1,
plan_idx substructure_end,
qep_tab_map unhandled_duplicates 
)
static

Given a range of tables (where we assume that we've already handled first_idx..(this_idx-1) as inner joins), figure out whether this is a semijoin, an outer join or a weedout.

In general, the outermost structure wins; if we are in one of the rare cases where there are e.g. coincident (first match) semijoins and weedouts, we do various forms of conflict resolution:

  • Unhandled weedouts will add elements to unhandled_duplicates (to be handled at the top level of the query).
  • Unhandled semijoins will either: Set add_limit_1 to true, which means a LIMIT 1 iterator should be added, or Add elements to unhandled_duplicates in situations that cannot be solved by a simple one-table, one-row LIMIT.

If not returning NONE, substructure_end will also be filled with where this sub-join ends (exclusive).

◆ FinishPendingOperations()

AccessPath * FinishPendingOperations ( THD thd,
AccessPath path,
QEP_TAB remove_duplicates_loose_scan_qep_tab,
const vector< PendingCondition > &  pending_conditions,
table_map conditions_depend_on_outer_tables 
)

◆ get_end_select_func()

QEP_TAB::enum_op_type JOIN::get_end_select_func ( )

Rows produced by a join sweep may end up in a temporary table or be sent to a client. Setup the function of the nested loop join algorithm which handles final fully constructed and matched records.

Returns
end_query_block function to use. This function can't fail.

◆ get_update_or_delete_target_tables()

static table_map get_update_or_delete_target_tables ( const JOIN join)
static

◆ GetAccessPathForDerivedTable() [1/2]

static AccessPath * GetAccessPathForDerivedTable ( THD thd,
QEP_TAB qep_tab,
AccessPath table_path 
)
static

◆ GetAccessPathForDerivedTable() [2/2]

AccessPath * GetAccessPathForDerivedTable ( THD thd,
Table_ref table_ref,
TABLE table,
bool  rematerialize,
Mem_root_array< const AccessPath * > *  invalidators,
bool  need_rowid,
AccessPath table_path 
)

◆ GetInnermostCondition()

static Item * GetInnermostCondition ( Item item)
static
Returns
the innermost condition of a nested trigger condition. If the item is not a trigger condition, the item itself is returned.

◆ GetPrunedTables()

static table_map GetPrunedTables ( const AccessPath path)
static

Find all the tables below "path" that have been pruned and replaced by a ZERO_ROWS access path.

◆ GetTableAccessPath()

static AccessPath * GetTableAccessPath ( THD thd,
QEP_TAB qep_tab,
QEP_TAB qep_tabs 
)
static

Get the RowIterator used for scanning the given table, with any required materialization operations done first.

◆ GetTriggerCondOrNull()

static Item_func_trig_cond * GetTriggerCondOrNull ( Item item)
static

◆ group_rec_cmp()

static bool group_rec_cmp ( ORDER group,
uchar rec0,
uchar rec1 
)
static

Compare GROUP BY in from tmp table's record[0] and record[1].

Returns
true records are different false records are the same

◆ has_rollup_result()

bool has_rollup_result ( Item item)

Checks if an item has a ROLLUP NULL which needs to be written to temp table.

Parameters
itemItem for which we need to detect if ROLLUP NULL has to be written.
Returns
false if ROLLUP NULL need not be written for this item. true if it has to be written.

◆ init_tmptable_sum_functions()

void init_tmptable_sum_functions ( Item_sum **  func_ptr)

◆ InsideOuterOrAntiJoin()

static bool InsideOuterOrAntiJoin ( QEP_TAB qep_tab)
static

◆ is_rollup_group_wrapper()

bool is_rollup_group_wrapper ( const Item item)

◆ IsJoinCondition()

static bool IsJoinCondition ( const Item item,
const QEP_TAB qep_tab 
)
static
Parameters
itemThe item we want to see if is a join condition.
qep_tabThe table we are joining in.
Returns
true if 'item' is a join condition for a join involving the given table (both equi-join and non-equi-join condition).

◆ IsTableScan()

static bool IsTableScan ( AccessPath path)
static

◆ join_read_const_table()

int join_read_const_table ( JOIN_TAB tab,
POSITION pos 
)

Reads content of constant table.

Parameters
tabtable
posposition of table in query plan
Return values
0ok, one row was found or one NULL-complemented row was created
-1ok, no row was found and no NULL-complemented row was created
1error

◆ make_group_fields()

bool make_group_fields ( JOIN main_join,
JOIN curr_join 
)

allocate group fields or take prepared (cached).

Parameters
main_joinjoin of current select
curr_joincurrent join (join of current select or temporary copy of it)
Return values
0ok
1failed

◆ MarkUnhandledDuplicates()

static void MarkUnhandledDuplicates ( SJ_TMP_TABLE weedout,
plan_idx  weedout_start,
plan_idx  weedout_end,
qep_tab_map unhandled_duplicates 
)
static

For a given duplicate weedout operation, figure out which tables are supposed to be deduplicated by it, and add those to unhandled_duplicates.

(SJ_TMP_TABLE contains the deduplication key, which is exactly the complement of the tables to be deduplicated.)

◆ MoveCompositeIteratorsFromTablePath()

AccessPath * MoveCompositeIteratorsFromTablePath ( THD thd,
AccessPath path,
const Query_block query_block 
)

For a MATERIALIZE access path, move any non-basic iterators (e.g.

sorts and filters) from table_path to above the path, for easier EXPLAIN and generally simpler structure. Note the assert in CreateIteratorFromAccessPath() that we succeeded. (ALTERNATIVE counts as a basic iterator in this regard.)

We do this by finding the second-bottommost access path, and inserting our materialize node as its child. The bottommost one becomes the actual table access path.

If a ZERO_ROWS access path is materialized, we simply replace the MATERIALIZE path with the ZERO_ROWS path, since there is nothing to materialize.

Parameters
thdThe current thread.
paththe MATERIALIZE path.
query_blockThe query block in which 'path' belongs.
Returns
The new root of the set of AccessPaths formed by 'path' and its descendants.

◆ NewInvalidatorAccessPathForTable()

static AccessPath * NewInvalidatorAccessPathForTable ( THD thd,
AccessPath path,
QEP_TAB qep_tab,
plan_idx  table_index_to_invalidate 
)
static

◆ NewWeedoutAccessPathForTables()

static AccessPath * NewWeedoutAccessPathForTables ( THD thd,
const qep_tab_map  tables_to_deduplicate,
QEP_TAB qep_tabs,
uint  primary_tables,
AccessPath path 
)
static

◆ optimize_distinct()

void JOIN::optimize_distinct ( )
private

Optimize distinct when used on a subset of the tables.

E.g.,: SELECT DISTINCT t1.a FROM t1,t2 WHERE t1.b=t2.b In this case we can stop scanning t2 when we have found one t1.a

◆ pfs_batch_update()

bool QEP_TAB::pfs_batch_update ( const JOIN join) const

◆ PickOutConditionsForTableIndex() [1/2]

void PickOutConditionsForTableIndex ( int  table_idx,
vector< PendingCondition > *  from,
vector< Item * > *  to 
)

◆ PickOutConditionsForTableIndex() [2/2]

void PickOutConditionsForTableIndex ( int  table_idx,
vector< PendingCondition > *  from,
vector< PendingCondition > *  to 
)

◆ PossiblyAttachFilter() [1/2]

AccessPath * PossiblyAttachFilter ( AccessPath path,
const vector< Item * > &  conditions,
THD thd,
table_map conditions_depend_on_outer_tables 
)

Return a new iterator that wraps "iterator" and that tests all of the given conditions (if any), ANDed together.

If there are no conditions, just return the given iterator back.

◆ PossiblyAttachFilter() [2/2]

static AccessPath * PossiblyAttachFilter ( AccessPath path,
const vector< PendingCondition > &  conditions,
THD thd,
table_map conditions_depend_on_outer_tables 
)
static

◆ prepare_sum_aggregators()

bool prepare_sum_aggregators ( Item_sum **  sum_funcs,
bool  need_distinct 
)

◆ QueryMixesOuterBKAAndBNL()

static bool QueryMixesOuterBKAAndBNL ( JOIN join)
static

◆ read_const()

int read_const ( TABLE table,
Index_lookup ref 
)

◆ read_system()

static int read_system ( TABLE table)
static

Read a constant table when there is at most one matching row, using a table scan.

Parameters
tableTable to read
Return values
0Row was found
-1Row was not found
1Got an error (other than row not found) during read

◆ RecalculateTablePathCost()

static void RecalculateTablePathCost ( THD thd,
AccessPath path,
const Query_block outer_query_block 
)
static

Recalculate the cost of 'path'.

Parameters
thdCurrent thread.
paththe access path for which we update the cost numbers.
outer_query_blockthe query block to which 'path' belongs.

◆ record_prefix_size()

static size_t record_prefix_size ( const TABLE table)
static

Find out how many bytes it takes to store the smallest prefix which covers all the columns that will be read from a table.

Parameters
tablethe table to read
Returns
the size of the smallest prefix that covers all records to be read from the table

◆ RefToString()

string RefToString ( const Index_lookup ref,
const KEY key,
bool  include_nulls 
)

◆ replace_contents_of_rollup_wrappers_with_tmp_fields()

bool replace_contents_of_rollup_wrappers_with_tmp_fields ( THD thd,
Query_block select,
Item item_arg 
)

For each rollup wrapper below the given item, replace its argument with a temporary field, e.g.

1 + rollup_group_item(a) -> 1 + rollup_group_item(<temporary>.a).

Which temporary field to use is found by looking at the Query_block's group items, and looking up their (previously set) result fields.

◆ replace_embedded_rollup_references_with_tmp_fields()

static bool replace_embedded_rollup_references_with_tmp_fields ( THD thd,
Item item,
mem_root_deque< Item * > *  fields 
)
static

For each rollup wrapper below the given item, replace it with a temporary field, e.g.

1 + rollup_group_item(a) -> 1 + <temporary>.rollup_group_item(a)

Which temporary field to use is found by looking at the other fields; the rollup_group_item should already exist earlier in the list (and having a temporary table field set up), simply by virtue of being a group item.

◆ report_handler_error()

int report_handler_error ( TABLE table,
int  error 
)

Help function when we get some an error from the table handler.

◆ restore_fields()

void JOIN::restore_fields ( table_map  save_nullinfo)

Restore all result fields for all tables specified in save_nullinfo.

Parameters
save_nullinfoSet of tables for which restore is necessary.
Note
Const tables must have their NULL value flags restored,
See also
JOIN::clear_fields().

◆ set_record_buffer()

bool set_record_buffer ( TABLE table,
double  expected_rows_to_fetch 
)

Allocate a data buffer that the storage engine can use for fetching batches of records.

A buffer is only allocated if ha_is_record_buffer_wanted() returns true for the handler, and the scan in question is of a kind that could be expected to benefit from fetching records in batches.

Parameters
tablethe table to read
expected_rows_to_fetchnumber of rows the optimizer thinks we will be reading out of the table
Return values
trueif an error occurred when allocating the buffer
falseif a buffer was successfully allocated, or if a buffer was not attempted allocated

◆ SetCostOnHashJoinAccessPath()

void SetCostOnHashJoinAccessPath ( const Cost_model_server cost_model,
const POSITION pos_outer,
AccessPath path 
)

◆ SetCostOnNestedLoopAccessPath()

void SetCostOnNestedLoopAccessPath ( const Cost_model_server cost_model,
const POSITION pos_inner,
AccessPath path 
)

◆ SetCostOnTableAccessPath()

void SetCostOnTableAccessPath ( const Cost_model_server cost_model,
const POSITION pos,
bool  is_after_filter,
AccessPath path 
)

◆ setup_sum_funcs()

bool setup_sum_funcs ( THD thd,
Item_sum **  func_ptr 
)

Call setup() for all sum functions.

Parameters
thdthread handler
func_ptrsum function list
Return values
falseok
trueerror

◆ setup_tmptable_write_func()

void setup_tmptable_write_func ( QEP_TAB tab,
Opt_trace_object trace 
)

Setup write_func of QEP_tmp_table object.

Parameters
tabQEP_TAB of a tmp table
traceOpt_trace_object to add to

Function sets up write_func according to how QEP_tmp_table object that is attached to the given join_tab will be used in the query.

◆ SplitConditions()

void SplitConditions ( Item condition,
QEP_TAB current_table,
vector< Item * > *  predicates_below_join,
vector< PendingCondition > *  predicates_above_join,
vector< PendingCondition > *  join_conditions,
plan_idx  semi_join_table_idx,
qep_tab_map  left_tables 
)

◆ table_rec_cmp()

bool table_rec_cmp ( TABLE table)

Compare GROUP BY in from tmp table's record[0] and record[1].

Returns
true records are different false records are the same

◆ unique_hash_group()

static ulonglong unique_hash_group ( ORDER group)
static

Generate hash for unique constraint according to group-by list.

This reads the values of the GROUP BY expressions from fields so assumes those expressions have been computed and stored into fields of a temporary table; in practice this means that copy_funcs() must have been called.

◆ unwrap_rollup_group()

Item * unwrap_rollup_group ( Item item)

◆ update_const_equal_items()

static bool update_const_equal_items ( THD thd,
Item cond,
JOIN_TAB tab 
)
static

Check appearance of new constant items in multiple equalities of a condition after reading a constant table.

The function retrieves the cond condition and for each encountered multiple equality checks whether new constants have appeared after reading the constant (single row) table tab. If so it adjusts the multiple equality appropriately.

Parameters
thdthread handler
condcondition whose multiple equalities are to be checked
tabconstant table that has been read

◆ update_item_cache_if_changed()

int update_item_cache_if_changed ( List< Cached_item > &  list)

◆ update_tmptable_sum_func()

void update_tmptable_sum_func ( Item_sum **  func_ptr,
TABLE tmp_table 
)

Update record 0 in tmp_table from record 1.

◆ use_order()

bool QEP_TAB::use_order ( ) const

Use ordering provided by chosen index?

Check if access to this JOIN_TAB has to retrieve rows in sorted order as defined by the ordered index used to access this table.

◆ UseBKA()

static bool UseBKA ( QEP_TAB qep_tab)
static

◆ UseHashJoin()

static bool UseHashJoin ( QEP_TAB qep_tab)
static

Variable Documentation

◆ MAX_RECORD_BUFFER_SIZE

constexpr size_t MAX_RECORD_BUFFER_SIZE = 128 * 1024
staticconstexpr

Maximum amount of space (in bytes) to allocate for a Record_buffer.