MySQL 9.1.0
Source Code Documentation
AccessPath Struct Reference

Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path contains pretty much exactly the information needed to instantiate given iterator, plus some information that is only needed during planning, such as costs. More...

#include <access_path.h>

Public Types

enum  Type : uint8_t {
  TABLE_SCAN , SAMPLE_SCAN , INDEX_SCAN , INDEX_DISTANCE_SCAN ,
  REF , REF_OR_NULL , EQ_REF , PUSHED_JOIN_REF ,
  FULL_TEXT_SEARCH , CONST_TABLE , MRR , FOLLOW_TAIL ,
  INDEX_RANGE_SCAN , INDEX_MERGE , ROWID_INTERSECTION , ROWID_UNION ,
  INDEX_SKIP_SCAN , GROUP_INDEX_SKIP_SCAN , DYNAMIC_INDEX_RANGE_SCAN , TABLE_VALUE_CONSTRUCTOR ,
  FAKE_SINGLE_ROW , ZERO_ROWS , ZERO_ROWS_AGGREGATED , MATERIALIZED_TABLE_FUNCTION ,
  UNQUALIFIED_COUNT , NESTED_LOOP_JOIN , NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL , BKA_JOIN ,
  HASH_JOIN , FILTER , SORT , AGGREGATE ,
  TEMPTABLE_AGGREGATE , LIMIT_OFFSET , STREAM , MATERIALIZE ,
  MATERIALIZE_INFORMATION_SCHEMA_TABLE , APPEND , WINDOW , WEEDOUT ,
  REMOVE_DUPLICATES , REMOVE_DUPLICATES_ON_INDEX , ALTERNATIVE , CACHE_INVALIDATOR ,
  DELETE_ROWS , UPDATE_ROWS
}
 
enum  Safety : uint8_t { SAFE = 0 , SAFE_IF_SCANNED_ONCE = 1 , UNSAFE = 2 }
 A general enum to describe the safety of a given operation. More...
 

Public Member Functions

double cost () const
 
double init_cost () const
 
double first_row_cost () const
 The cost of reading the first row. More...
 
double init_once_cost () const
 
double cost_before_filter () const
 
void set_cost (double val)
 
void set_init_cost (double val)
 
void set_init_once_cost (double val)
 
void set_cost_before_filter (double val)
 
double rescan_cost () const
 Return the cost of scanning the given path for the second time (or later) in the given query block. More...
 
OverflowBitsetapplied_sargable_join_predicates ()
 Bitmap of sargable join predicates that have already been applied in this access path by means of an index lookup (ref access), again referring to “predicates”, and thus should not be counted again for selectivity. More...
 
const OverflowBitsetapplied_sargable_join_predicates () const
 
OverflowBitsetsubsumed_sargable_join_predicates ()
 Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applied and will subsume the join predicate entirely, ie., not only should the selectivity not be double-counted, but the predicate itself is redundant and need not be applied as a filter. More...
 
const OverflowBitsetsubsumed_sargable_join_predicates () const
 
auto & table_scan ()
 
const auto & table_scan () const
 
auto & sample_scan ()
 
const auto & sample_scan () const
 
auto & index_scan ()
 
const auto & index_scan () const
 
auto & index_distance_scan ()
 
const auto & index_distance_scan () const
 
auto & ref ()
 
const auto & ref () const
 
auto & ref_or_null ()
 
const auto & ref_or_null () const
 
auto & eq_ref ()
 
const auto & eq_ref () const
 
auto & pushed_join_ref ()
 
const auto & pushed_join_ref () const
 
auto & full_text_search ()
 
const auto & full_text_search () const
 
auto & const_table ()
 
const auto & const_table () const
 
auto & mrr ()
 
const auto & mrr () const
 
auto & follow_tail ()
 
const auto & follow_tail () const
 
auto & index_range_scan ()
 
const auto & index_range_scan () const
 
auto & index_merge ()
 
const auto & index_merge () const
 
auto & rowid_intersection ()
 
const auto & rowid_intersection () const
 
auto & rowid_union ()
 
const auto & rowid_union () const
 
auto & index_skip_scan ()
 
const auto & index_skip_scan () const
 
auto & group_index_skip_scan ()
 
const auto & group_index_skip_scan () const
 
auto & dynamic_index_range_scan ()
 
const auto & dynamic_index_range_scan () const
 
auto & materialized_table_function ()
 
const auto & materialized_table_function () const
 
auto & unqualified_count ()
 
const auto & unqualified_count () const
 
auto & table_value_constructor ()
 
const auto & table_value_constructor () const
 
auto & fake_single_row ()
 
const auto & fake_single_row () const
 
auto & zero_rows ()
 
const auto & zero_rows () const
 
auto & zero_rows_aggregated ()
 
const auto & zero_rows_aggregated () const
 
auto & hash_join ()
 
const auto & hash_join () const
 
auto & bka_join ()
 
const auto & bka_join () const
 
auto & nested_loop_join ()
 
const auto & nested_loop_join () const
 
auto & nested_loop_semijoin_with_duplicate_removal ()
 
const auto & nested_loop_semijoin_with_duplicate_removal () const
 
auto & filter ()
 
const auto & filter () const
 
auto & sort ()
 
const auto & sort () const
 
auto & aggregate ()
 
const auto & aggregate () const
 
auto & temptable_aggregate ()
 
const auto & temptable_aggregate () const
 
auto & limit_offset ()
 
const auto & limit_offset () const
 
auto & stream ()
 
const auto & stream () const
 
auto & materialize ()
 
const auto & materialize () const
 
auto & materialize_information_schema_table ()
 
const auto & materialize_information_schema_table () const
 
auto & append ()
 
const auto & append () const
 
auto & window ()
 
const auto & window () const
 
auto & weedout ()
 
const auto & weedout () const
 
auto & remove_duplicates ()
 
const auto & remove_duplicates () const
 
auto & remove_duplicates_on_index ()
 
const auto & remove_duplicates_on_index () const
 
auto & alternative ()
 
const auto & alternative () const
 
auto & cache_invalidator ()
 
const auto & cache_invalidator () const
 
auto & delete_rows ()
 
const auto & delete_rows () const
 
auto & update_rows ()
 
const auto & update_rows () const
 
double num_output_rows () const
 
void set_num_output_rows (double val)
 

Public Attributes

enum AccessPath::Type type
 
Safety safe_for_rowid = SAFE
 Whether it is safe to get row IDs (for sorting) from this access path. More...
 
bool count_examined_rows: 1 {false}
 Whether this access path counts as one that scans a base table, and thus should be counted towards examined_rows. More...
 
bool has_group_skip_scan: 1 {false}
 Whether this access path contains a GROUP_INDEX_SKIP_SCAN. More...
 
bool forced_by_dbug: 1 {false}
 Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x... statement. More...
 
int8_t immediate_update_delete_table {-1}
 For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from immediately as the rows are read from the iterator, if this path is only read from once. More...
 
int ordering_state = 0
 Which ordering the rows produced by this path follow, if any (see interesting_orders.h). More...
 
RowIteratoriterator = nullptr
 If an iterator has been instantiated for this access path, points to the iterator. More...
 
double num_output_rows_before_filter {kUnknownRowCount}
 If no filter, identical to num_output_rows. More...
 
OverflowBitset filter_predicates {0}
 Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” array internal to the join optimizer. More...
 
OverflowBitset delayed_predicates {0}
 Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for instance because they reference other tables, or because because we could not push them down into the nullable side of outer joins). More...
 
hypergraph::NodeMap parameter_tables {0}
 If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this access path are evaluated; that is, this access path must be put on the inner side of a nested-loop join (or multiple such joins) where the outer side includes all of the given tables. More...
 
void * secondary_engine_data {nullptr}
 Auxiliary data used by a secondary storage engine while processing the access path during optimization and execution. More...
 
TABLEtable
 
struct {
   TABLE *   table
 
table_scan
 
double sampling_percentage
 
enum tablesample_type sampling_type
 
struct {
   TABLE *   table
 
   double   sampling_percentage
 
   enum tablesample_type   sampling_type
 
sample_scan
 
int idx
 
bool use_order
 
bool reverse
 
struct {
   TABLE *   table
 
   int   idx
 
   bool   use_order
 
   bool   reverse
 
index_scan
 
QUICK_RANGErange
 
struct {
   TABLE *   table
 
   int   idx
 
   QUICK_RANGE *   range
 
   bool   reverse
 
index_distance_scan
 
Index_lookupref
 
struct {
   TABLE *   table
 
   Index_lookup *   ref
 
   bool   use_order
 
   bool   reverse
 
ref
 
struct {
   TABLE *   table
 
   Index_lookup *   ref
 
   bool   use_order
 
ref_or_null
 
struct {
   TABLE *   table
 
   Index_lookup *   ref
 
eq_ref
 
bool is_unique
 
struct {
   TABLE *   table
 
   Index_lookup *   ref
 
   bool   use_order
 
   bool   is_unique
 
pushed_join_ref
 
bool use_limit
 
Item_func_matchft_func
 
struct {
   TABLE *   table
 
   Index_lookup *   ref
 
   bool   use_order
 
   bool   use_limit
 
   Item_func_match *   ft_func
 
full_text_search
 
struct {
   TABLE *   table
 
   Index_lookup *   ref
 
const_table
 
AccessPathbka_path
 
int mrr_flags
 
bool keep_current_rowid
 
struct {
   TABLE *   table
 
   Index_lookup *   ref
 
   AccessPath *   bka_path
 
   int   mrr_flags
 
   bool   keep_current_rowid
 
mrr
 
struct {
   TABLE *   table
 
follow_tail
 
KEY_PARTused_key_part
 
QUICK_RANGE ** ranges
 
unsigned num_ranges
 
unsigned mrr_flags
 
unsigned mrr_buf_size
 
unsigned index
 
unsigned num_used_key_parts
 
bool can_be_used_for_ror: 1
 
bool need_rows_in_rowid_order: 1
 
bool can_be_used_for_imerge: 1
 
bool reuse_handler: 1
 
bool geometry: 1
 
bool using_extended_key_parts: 1
 
struct {
   KEY_PART *   used_key_part
 
   QUICK_RANGE **   ranges
 
   unsigned   num_ranges
 
   unsigned   mrr_flags
 
   unsigned   mrr_buf_size
 
   unsigned   index
 
   unsigned   num_used_key_parts
 
   bool   can_be_used_for_ror: 1
 
   bool   need_rows_in_rowid_order: 1
 
   bool   can_be_used_for_imerge: 1
 
   bool   reuse_handler: 1
 
   bool   geometry: 1
 
   bool   reverse: 1
 
   bool   using_extended_key_parts: 1
 
index_range_scan
 
bool forced_by_hint
 
bool allow_clustered_primary_key_scan
 
Mem_root_array< AccessPath * > * children
 
struct {
   TABLE *   table
 
   bool   forced_by_hint
 
   bool   allow_clustered_primary_key_scan
 
   Mem_root_array< AccessPath * > *   children
 
index_merge
 
AccessPathcpk_child
 
bool retrieve_full_rows
 
bool is_covering
 
struct {
   TABLE *   table
 
   Mem_root_array< AccessPath * > *   children
 
   AccessPath *   cpk_child
 
   bool   forced_by_hint
 
   bool   retrieve_full_rows
 
   bool   need_rows_in_rowid_order
 
   bool   reuse_handler
 
   bool   is_covering
 
rowid_intersection
 
struct {
   TABLE *   table
 
   Mem_root_array< AccessPath * > *   children
 
   bool   forced_by_hint
 
rowid_union
 
IndexSkipScanParametersparam
 
struct {
   TABLE *   table
 
   unsigned   index
 
   unsigned   num_used_key_parts
 
   bool   forced_by_hint
 
   IndexSkipScanParameters *   param
 
index_skip_scan
 
GroupIndexSkipScanParametersparam
 
struct {
   TABLE *   table
 
   unsigned   index
 
   unsigned   num_used_key_parts
 
   bool   forced_by_hint
 
   GroupIndexSkipScanParameters *   param
 
group_index_skip_scan
 
QEP_TABqep_tab
 
struct {
   TABLE *   table
 
   QEP_TAB *   qep_tab
 
dynamic_index_range_scan
 
Table_functiontable_function
 
AccessPathtable_path
 
struct {
   TABLE *   table
 
   Table_function *   table_function
 
   AccessPath *   table_path
 
materialized_table_function
 
struct {
unqualified_count
 
Mem_root_array< Item_values_column * > * output_refs
 
struct {
   Mem_root_array< Item_values_column * > *   output_refs
 
table_value_constructor
 
struct {
fake_single_row
 
AccessPathchild
 
const char * cause
 
struct {
   AccessPath *   child
 
   const char *   cause
 
zero_rows
 
struct {
   const char *   cause
 
zero_rows_aggregated
 
AccessPathouter
 
AccessPathinner
 
const JoinPredicatejoin_predicate
 
bool allow_spill_to_disk
 
bool store_rowids
 
bool rewrite_semi_to_inner
 
table_map tables_to_get_rowid_for
 
struct {
   AccessPath *   outer
 
   AccessPath *   inner
 
   const JoinPredicate *   join_predicate
 
   bool   allow_spill_to_disk
 
   bool   store_rowids
 
   bool   rewrite_semi_to_inner
 
   table_map   tables_to_get_rowid_for
 
hash_join
 
JoinType join_type
 
unsigned mrr_length_per_rec
 
float rec_per_key
 
struct {
   AccessPath *   outer
 
   AccessPath *   inner
 
   JoinType   join_type
 
   unsigned   mrr_length_per_rec
 
   float   rec_per_key
 
   bool   store_rowids
 
   table_map   tables_to_get_rowid_for
 
bka_join
 
bool pfs_batch_mode
 
bool already_expanded_predicates
 
OverflowBitset equijoin_predicates
 
struct {
   AccessPath *   outer
 
   AccessPath *   inner
 
   JoinType   join_type
 
   bool   pfs_batch_mode
 
   bool   already_expanded_predicates
 
   const JoinPredicate *   join_predicate
 
   OverflowBitset   equijoin_predicates
 
nested_loop_join
 
const TABLEtable
 
KEYkey
 
size_t key_len
 
struct {
   AccessPath *   outer
 
   AccessPath *   inner
 
   const TABLE *   table
 
   KEY *   key
 
   size_t   key_len
 
nested_loop_semijoin_with_duplicate_removal
 
Itemcondition
 
bool materialize_subqueries
 
struct {
   AccessPath *   child
 
   Item *   condition
 
   bool   materialize_subqueries
 
filter
 
Filesortfilesort
 
ORDERorder
 
ha_rows limit
 
bool remove_duplicates
 
bool unwrap_rollup
 
bool force_sort_rowids
 
struct {
   AccessPath *   child
 
   Filesort *   filesort
 
   table_map   tables_to_get_rowid_for
 
   ORDER *   order
 
   ha_rows   limit
 
   bool   remove_duplicates
 
   bool   unwrap_rollup
 
   bool   force_sort_rowids
 
sort
 
olap_type olap
 
struct {
   AccessPath *   child
 
   olap_type   olap
 
aggregate
 
AccessPathsubquery_path
 
JOINjoin
 
Temp_table_paramtemp_table_param
 
int ref_slice
 
struct {
   AccessPath *   subquery_path
 
   JOIN *   join
 
   Temp_table_param *   temp_table_param
 
   TABLE *   table
 
   AccessPath *   table_path
 
   int   ref_slice
 
temptable_aggregate
 
ha_rows offset
 
bool count_all_rows
 
bool reject_multiple_rows
 
ha_rowssend_records_override
 
struct {
   AccessPath *   child
 
   ha_rows   limit
 
   ha_rows   offset
 
   bool   count_all_rows
 
   bool   reject_multiple_rows
 
   ha_rows *   send_records_override
 
limit_offset
 
bool provide_rowid
 
struct {
   AccessPath *   child
 
   JOIN *   join
 
   Temp_table_param *   temp_table_param
 
   TABLE *   table
 
   bool   provide_rowid
 
   int   ref_slice
 
stream
 
MaterializePathParametersparam
 
double subquery_cost
 The total cost of executing the queries that we materialize. More...
 
double subquery_rows
 The number of materialized rows (as opposed to the number of rows fetched by table_path). More...
 
struct {
   AccessPath *   table_path
 
   MaterializePathParameters *   param
 
   double   subquery_cost
 The total cost of executing the queries that we materialize. More...
 
   double   subquery_rows
 The number of materialized rows (as opposed to the number of rows fetched by table_path). More...
 
materialize
 
Table_reftable_list
 
struct {
   AccessPath *   table_path
 
   Table_ref *   table_list
 
   Item *   condition
 
materialize_information_schema_table
 
Mem_root_array< AppendPathParameters > * children
 
struct {
   Mem_root_array< AppendPathParameters > *   children
 
append
 
Windowwindow
 
TABLEtemp_table
 
bool needs_buffering
 
struct {
   AccessPath *   child
 
   Window *   window
 
   TABLE *   temp_table
 
   Temp_table_param *   temp_table_param
 
   int   ref_slice
 
   bool   needs_buffering
 
window
 
SJ_TMP_TABLEweedout_table
 
struct {
   AccessPath *   child
 
   SJ_TMP_TABLE *   weedout_table
 
   table_map   tables_to_get_rowid_for
 
weedout
 
Item ** group_items
 
int group_items_size
 
struct {
   AccessPath *   child
 
   Item **   group_items
 
   int   group_items_size
 
remove_duplicates
 
unsigned loosescan_key_len
 
struct {
   AccessPath *   child
 
   TABLE *   table
 
   KEY *   key
 
   unsigned   loosescan_key_len
 
remove_duplicates_on_index
 
AccessPathtable_scan_path
 
Index_lookupused_ref
 
struct {
   AccessPath *   table_scan_path
 
   AccessPath *   child
 
   Index_lookup *   used_ref
 
alternative
 
const char * name
 
struct {
   AccessPath *   child
 
   const char *   name
 
cache_invalidator
 
table_map tables_to_delete_from
 
table_map immediate_tables
 
struct {
   AccessPath *   child
 
   table_map   tables_to_delete_from
 
   table_map   immediate_tables
 
delete_rows
 
table_map tables_to_update
 
struct {
   AccessPath *   child
 
   table_map   tables_to_update
 
   table_map   immediate_tables
 
update_rows
 

Private Attributes

double m_num_output_rows {kUnknownRowCount}
 Expected number of output rows. More...
 
double m_cost {kUnknownCost}
 Expected cost to read all of this access path once. More...
 
double m_init_cost {kUnknownCost}
 Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + (k/N) * (cost - init_cost). More...
 
double m_init_once_cost {0.0}
 Of init_cost, how much of the initialization needs only to be done once per query block. More...
 
double m_cost_before_filter {kUnknownCost}
 If no filter, identical to cost. More...
 
union {
   struct {
      TABLE *   table
 
   }   table_scan
 
   struct {
      TABLE *   table
 
      double   sampling_percentage
 
      enum tablesample_type   sampling_type
 
   }   sample_scan
 
   struct {
      TABLE *   table
 
      int   idx
 
      bool   use_order
 
      bool   reverse
 
   }   index_scan
 
   struct {
      TABLE *   table
 
      int   idx
 
      QUICK_RANGE *   range
 
      bool   reverse
 
   }   index_distance_scan
 
   struct {
      TABLE *   table
 
      Index_lookup *   ref
 
      bool   use_order
 
      bool   reverse
 
   }   ref
 
   struct {
      TABLE *   table
 
      Index_lookup *   ref
 
      bool   use_order
 
   }   ref_or_null
 
   struct {
      TABLE *   table
 
      Index_lookup *   ref
 
   }   eq_ref
 
   struct {
      TABLE *   table
 
      Index_lookup *   ref
 
      bool   use_order
 
      bool   is_unique
 
   }   pushed_join_ref
 
   struct {
      TABLE *   table
 
      Index_lookup *   ref
 
      bool   use_order
 
      bool   use_limit
 
      Item_func_match *   ft_func
 
   }   full_text_search
 
   struct {
      TABLE *   table
 
      Index_lookup *   ref
 
   }   const_table
 
   struct {
      TABLE *   table
 
      Index_lookup *   ref
 
      AccessPath *   bka_path
 
      int   mrr_flags
 
      bool   keep_current_rowid
 
   }   mrr
 
   struct {
      TABLE *   table
 
   }   follow_tail
 
   struct {
      KEY_PART *   used_key_part
 
      QUICK_RANGE **   ranges
 
      unsigned   num_ranges
 
      unsigned   mrr_flags
 
      unsigned   mrr_buf_size
 
      unsigned   index
 
      unsigned   num_used_key_parts
 
      bool   can_be_used_for_ror: 1
 
      bool   need_rows_in_rowid_order: 1
 
      bool   can_be_used_for_imerge: 1
 
      bool   reuse_handler: 1
 
      bool   geometry: 1
 
      bool   reverse: 1
 
      bool   using_extended_key_parts: 1
 
   }   index_range_scan
 
   struct {
      TABLE *   table
 
      bool   forced_by_hint
 
      bool   allow_clustered_primary_key_scan
 
      Mem_root_array< AccessPath * > *   children
 
   }   index_merge
 
   struct {
      TABLE *   table
 
      Mem_root_array< AccessPath * > *   children
 
      AccessPath *   cpk_child
 
      bool   forced_by_hint
 
      bool   retrieve_full_rows
 
      bool   need_rows_in_rowid_order
 
      bool   reuse_handler
 
      bool   is_covering
 
   }   rowid_intersection
 
   struct {
      TABLE *   table
 
      Mem_root_array< AccessPath * > *   children
 
      bool   forced_by_hint
 
   }   rowid_union
 
   struct {
      TABLE *   table
 
      unsigned   index
 
      unsigned   num_used_key_parts
 
      bool   forced_by_hint
 
      IndexSkipScanParameters *   param
 
   }   index_skip_scan
 
   struct {
      TABLE *   table
 
      unsigned   index
 
      unsigned   num_used_key_parts
 
      bool   forced_by_hint
 
      GroupIndexSkipScanParameters *   param
 
   }   group_index_skip_scan
 
   struct {
      TABLE *   table
 
      QEP_TAB *   qep_tab
 
   }   dynamic_index_range_scan
 
   struct {
      TABLE *   table
 
      Table_function *   table_function
 
      AccessPath *   table_path
 
   }   materialized_table_function
 
   struct {
   }   unqualified_count
 
   struct {
      Mem_root_array< Item_values_column * > *   output_refs
 
   }   table_value_constructor
 
   struct {
   }   fake_single_row
 
   struct {
      AccessPath *   child
 
      const char *   cause
 
   }   zero_rows
 
   struct {
      const char *   cause
 
   }   zero_rows_aggregated
 
   struct {
      AccessPath *   outer
 
      AccessPath *   inner
 
      const JoinPredicate *   join_predicate
 
      bool   allow_spill_to_disk
 
      bool   store_rowids
 
      bool   rewrite_semi_to_inner
 
      table_map   tables_to_get_rowid_for
 
   }   hash_join
 
   struct {
      AccessPath *   outer
 
      AccessPath *   inner
 
      JoinType   join_type
 
      unsigned   mrr_length_per_rec
 
      float   rec_per_key
 
      bool   store_rowids
 
      table_map   tables_to_get_rowid_for
 
   }   bka_join
 
   struct {
      AccessPath *   outer
 
      AccessPath *   inner
 
      JoinType   join_type
 
      bool   pfs_batch_mode
 
      bool   already_expanded_predicates
 
      const JoinPredicate *   join_predicate
 
      OverflowBitset   equijoin_predicates
 
   }   nested_loop_join
 
   struct {
      AccessPath *   outer
 
      AccessPath *   inner
 
      const TABLE *   table
 
      KEY *   key
 
      size_t   key_len
 
   }   nested_loop_semijoin_with_duplicate_removal
 
   struct {
      AccessPath *   child
 
      Item *   condition
 
      bool   materialize_subqueries
 
   }   filter
 
   struct {
      AccessPath *   child
 
      Filesort *   filesort
 
      table_map   tables_to_get_rowid_for
 
      ORDER *   order
 
      ha_rows   limit
 
      bool   remove_duplicates
 
      bool   unwrap_rollup
 
      bool   force_sort_rowids
 
   }   sort
 
   struct {
      AccessPath *   child
 
      olap_type   olap
 
   }   aggregate
 
   struct {
      AccessPath *   subquery_path
 
      JOIN *   join
 
      Temp_table_param *   temp_table_param
 
      TABLE *   table
 
      AccessPath *   table_path
 
      int   ref_slice
 
   }   temptable_aggregate
 
   struct {
      AccessPath *   child
 
      ha_rows   limit
 
      ha_rows   offset
 
      bool   count_all_rows
 
      bool   reject_multiple_rows
 
      ha_rows *   send_records_override
 
   }   limit_offset
 
   struct {
      AccessPath *   child
 
      JOIN *   join
 
      Temp_table_param *   temp_table_param
 
      TABLE *   table
 
      bool   provide_rowid
 
      int   ref_slice
 
   }   stream
 
   struct {
      AccessPath *   table_path
 
      MaterializePathParameters *   param
 
      double   subquery_cost
 The total cost of executing the queries that we materialize. More...
 
      double   subquery_rows
 The number of materialized rows (as opposed to the number of rows fetched by table_path). More...
 
   }   materialize
 
   struct {
      AccessPath *   table_path
 
      Table_ref *   table_list
 
      Item *   condition
 
   }   materialize_information_schema_table
 
   struct {
      Mem_root_array< AppendPathParameters > *   children
 
   }   append
 
   struct {
      AccessPath *   child
 
      Window *   window
 
      TABLE *   temp_table
 
      Temp_table_param *   temp_table_param
 
      int   ref_slice
 
      bool   needs_buffering
 
   }   window
 
   struct {
      AccessPath *   child
 
      SJ_TMP_TABLE *   weedout_table
 
      table_map   tables_to_get_rowid_for
 
   }   weedout
 
   struct {
      AccessPath *   child
 
      Item **   group_items
 
      int   group_items_size
 
   }   remove_duplicates
 
   struct {
      AccessPath *   child
 
      TABLE *   table
 
      KEY *   key
 
      unsigned   loosescan_key_len
 
   }   remove_duplicates_on_index
 
   struct {
      AccessPath *   table_scan_path
 
      AccessPath *   child
 
      Index_lookup *   used_ref
 
   }   alternative
 
   struct {
      AccessPath *   child
 
      const char *   name
 
   }   cache_invalidator
 
   struct {
      AccessPath *   child
 
      table_map   tables_to_delete_from
 
      table_map   immediate_tables
 
   }   delete_rows
 
   struct {
      AccessPath *   child
 
      table_map   tables_to_update
 
      table_map   immediate_tables
 
   }   update_rows
 
u
 

Detailed Description

Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path contains pretty much exactly the information needed to instantiate given iterator, plus some information that is only needed during planning, such as costs.

(The new join optimizer will extend this somewhat in the future. Some iterators also need the query block, ie., JOIN object, they are part of, but that is implicitly available when constructing the tree.)

AccessPath objects build on a variant, ie., they can hold an access path of any type (table scan, filter, hash join, sort, etc.), although only one at the same time. Currently, they contain 32 bytes of base information that is common to any access path (type identifier, costs, etc.), and then up to 40 bytes that is type-specific (e.g. for a table scan, the TABLE object). It would be nice if we could squeeze it down to 64 and fit a cache line exactly, but it does not seem to be easy without fairly large contortions.

We could have solved this by inheritance, but the fixed-size design makes it possible to replace an access path when a better one is found, without introducing a new allocation, which will be important when using them as a planning structure.

Member Enumeration Documentation

◆ Safety

enum AccessPath::Safety : uint8_t

A general enum to describe the safety of a given operation.

Currently we only use this to describe row IDs, but it can easily be reused for safety of updating a table we're reading from (the Halloween problem), or just generally unreproducible results (e.g. a TABLESAMPLE changing due to external factors).

Less safe values have higher numerical values.

Enumerator
SAFE 

The given operation is always safe on this access path.

SAFE_IF_SCANNED_ONCE 

The given operation is safe if this access path is scanned once, but not if it's scanned multiple times (e.g.

used on the inner side of a nested-loop join). A typical example of this is a derived table or CTE that is rematerialized on each scan, so that references to the old values (such as row IDs) are no longer valid.

UNSAFE 

The given operation is unsafe on this access path, no matter how many or few times it's scanned.

Often, it may help to materialize it (assuming the materialization itself doesn't use the operation in question).

◆ Type

enum AccessPath::Type : uint8_t
Enumerator
TABLE_SCAN 
SAMPLE_SCAN 
INDEX_SCAN 
INDEX_DISTANCE_SCAN 
REF 
REF_OR_NULL 
EQ_REF 
PUSHED_JOIN_REF 
FULL_TEXT_SEARCH 
CONST_TABLE 
MRR 
FOLLOW_TAIL 
INDEX_RANGE_SCAN 
INDEX_MERGE 
ROWID_INTERSECTION 
ROWID_UNION 
INDEX_SKIP_SCAN 
GROUP_INDEX_SKIP_SCAN 
DYNAMIC_INDEX_RANGE_SCAN 
TABLE_VALUE_CONSTRUCTOR 
FAKE_SINGLE_ROW 
ZERO_ROWS 
ZERO_ROWS_AGGREGATED 
MATERIALIZED_TABLE_FUNCTION 
UNQUALIFIED_COUNT 
NESTED_LOOP_JOIN 
NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL 
BKA_JOIN 
HASH_JOIN 
FILTER 
SORT 
AGGREGATE 
TEMPTABLE_AGGREGATE 
LIMIT_OFFSET 
STREAM 
MATERIALIZE 
MATERIALIZE_INFORMATION_SCHEMA_TABLE 
APPEND 
WINDOW 
WEEDOUT 
REMOVE_DUPLICATES 
REMOVE_DUPLICATES_ON_INDEX 
ALTERNATIVE 
CACHE_INVALIDATOR 
DELETE_ROWS 
UPDATE_ROWS 

Member Function Documentation

◆ aggregate() [1/2]

auto & AccessPath::aggregate ( )
inline

◆ aggregate() [2/2]

const auto & AccessPath::aggregate ( ) const
inline

◆ alternative() [1/2]

auto & AccessPath::alternative ( )
inline

◆ alternative() [2/2]

const auto & AccessPath::alternative ( ) const
inline

◆ append() [1/2]

auto & AccessPath::append ( )
inline

◆ append() [2/2]

const auto & AccessPath::append ( ) const
inline

◆ applied_sargable_join_predicates() [1/2]

OverflowBitset & AccessPath::applied_sargable_join_predicates ( )
inline

Bitmap of sargable join predicates that have already been applied in this access path by means of an index lookup (ref access), again referring to “predicates”, and thus should not be counted again for selectivity.

Note that the filter may need to be applied nevertheless (especially in case of type conversions); see subsumed_sargable_join_predicates.

Since these refer to the same array as filter_predicates, they will never overlap with filter_predicates, and so we can reuse the same memory using an alias (a union would not be allowed, since OverflowBitset is a class with non-trivial default constructor), even though the meaning is entirely separate. If N = num_where_predicates in the hypergraph, then bits 0..(N-1) belong to filter_predicates, and the rest to applied_sargable_join_predicates.

◆ applied_sargable_join_predicates() [2/2]

const OverflowBitset & AccessPath::applied_sargable_join_predicates ( ) const
inline

◆ bka_join() [1/2]

auto & AccessPath::bka_join ( )
inline

◆ bka_join() [2/2]

const auto & AccessPath::bka_join ( ) const
inline

◆ cache_invalidator() [1/2]

auto & AccessPath::cache_invalidator ( )
inline

◆ cache_invalidator() [2/2]

const auto & AccessPath::cache_invalidator ( ) const
inline

◆ const_table() [1/2]

auto & AccessPath::const_table ( )
inline

◆ const_table() [2/2]

const auto & AccessPath::const_table ( ) const
inline

◆ cost()

double AccessPath::cost ( ) const
inline

◆ cost_before_filter()

double AccessPath::cost_before_filter ( ) const
inline

◆ delete_rows() [1/2]

auto & AccessPath::delete_rows ( )
inline

◆ delete_rows() [2/2]

const auto & AccessPath::delete_rows ( ) const
inline

◆ dynamic_index_range_scan() [1/2]

auto & AccessPath::dynamic_index_range_scan ( )
inline

◆ dynamic_index_range_scan() [2/2]

const auto & AccessPath::dynamic_index_range_scan ( ) const
inline

◆ eq_ref() [1/2]

auto & AccessPath::eq_ref ( )
inline

◆ eq_ref() [2/2]

const auto & AccessPath::eq_ref ( ) const
inline

◆ fake_single_row() [1/2]

auto & AccessPath::fake_single_row ( )
inline

◆ fake_single_row() [2/2]

const auto & AccessPath::fake_single_row ( ) const
inline

◆ filter() [1/2]

auto & AccessPath::filter ( )
inline

◆ filter() [2/2]

const auto & AccessPath::filter ( ) const
inline

◆ first_row_cost()

double AccessPath::first_row_cost ( ) const
inline

The cost of reading the first row.

◆ follow_tail() [1/2]

auto & AccessPath::follow_tail ( )
inline

◆ follow_tail() [2/2]

const auto & AccessPath::follow_tail ( ) const
inline

◆ full_text_search() [1/2]

auto & AccessPath::full_text_search ( )
inline

◆ full_text_search() [2/2]

const auto & AccessPath::full_text_search ( ) const
inline

◆ group_index_skip_scan() [1/2]

auto & AccessPath::group_index_skip_scan ( )
inline

◆ group_index_skip_scan() [2/2]

const auto & AccessPath::group_index_skip_scan ( ) const
inline

◆ hash_join() [1/2]

auto & AccessPath::hash_join ( )
inline

◆ hash_join() [2/2]

const auto & AccessPath::hash_join ( ) const
inline

◆ index_distance_scan() [1/2]

auto & AccessPath::index_distance_scan ( )
inline

◆ index_distance_scan() [2/2]

const auto & AccessPath::index_distance_scan ( ) const
inline

◆ index_merge() [1/2]

auto & AccessPath::index_merge ( )
inline

◆ index_merge() [2/2]

const auto & AccessPath::index_merge ( ) const
inline

◆ index_range_scan() [1/2]

auto & AccessPath::index_range_scan ( )
inline

◆ index_range_scan() [2/2]

const auto & AccessPath::index_range_scan ( ) const
inline

◆ index_scan() [1/2]

auto & AccessPath::index_scan ( )
inline

◆ index_scan() [2/2]

const auto & AccessPath::index_scan ( ) const
inline

◆ index_skip_scan() [1/2]

auto & AccessPath::index_skip_scan ( )
inline

◆ index_skip_scan() [2/2]

const auto & AccessPath::index_skip_scan ( ) const
inline

◆ init_cost()

double AccessPath::init_cost ( ) const
inline

◆ init_once_cost()

double AccessPath::init_once_cost ( ) const
inline

◆ limit_offset() [1/2]

auto & AccessPath::limit_offset ( )
inline

◆ limit_offset() [2/2]

const auto & AccessPath::limit_offset ( ) const
inline

◆ materialize() [1/2]

auto & AccessPath::materialize ( )
inline

◆ materialize() [2/2]

const auto & AccessPath::materialize ( ) const
inline

◆ materialize_information_schema_table() [1/2]

auto & AccessPath::materialize_information_schema_table ( )
inline

◆ materialize_information_schema_table() [2/2]

const auto & AccessPath::materialize_information_schema_table ( ) const
inline

◆ materialized_table_function() [1/2]

auto & AccessPath::materialized_table_function ( )
inline

◆ materialized_table_function() [2/2]

const auto & AccessPath::materialized_table_function ( ) const
inline

◆ mrr() [1/2]

auto & AccessPath::mrr ( )
inline

◆ mrr() [2/2]

const auto & AccessPath::mrr ( ) const
inline

◆ nested_loop_join() [1/2]

auto & AccessPath::nested_loop_join ( )
inline

◆ nested_loop_join() [2/2]

const auto & AccessPath::nested_loop_join ( ) const
inline

◆ nested_loop_semijoin_with_duplicate_removal() [1/2]

auto & AccessPath::nested_loop_semijoin_with_duplicate_removal ( )
inline

◆ nested_loop_semijoin_with_duplicate_removal() [2/2]

const auto & AccessPath::nested_loop_semijoin_with_duplicate_removal ( ) const
inline

◆ num_output_rows()

double AccessPath::num_output_rows ( ) const
inline

◆ pushed_join_ref() [1/2]

auto & AccessPath::pushed_join_ref ( )
inline

◆ pushed_join_ref() [2/2]

const auto & AccessPath::pushed_join_ref ( ) const
inline

◆ ref() [1/2]

auto & AccessPath::ref ( )
inline

◆ ref() [2/2]

const auto & AccessPath::ref ( ) const
inline

◆ ref_or_null() [1/2]

auto & AccessPath::ref_or_null ( )
inline

◆ ref_or_null() [2/2]

const auto & AccessPath::ref_or_null ( ) const
inline

◆ remove_duplicates() [1/2]

auto & AccessPath::remove_duplicates ( )
inline

◆ remove_duplicates() [2/2]

const auto & AccessPath::remove_duplicates ( ) const
inline

◆ remove_duplicates_on_index() [1/2]

auto & AccessPath::remove_duplicates_on_index ( )
inline

◆ remove_duplicates_on_index() [2/2]

const auto & AccessPath::remove_duplicates_on_index ( ) const
inline

◆ rescan_cost()

double AccessPath::rescan_cost ( ) const
inline

Return the cost of scanning the given path for the second time (or later) in the given query block.

This is really the interesting metric, not init_once_cost in itself, but since nearly all paths have zero init_once_cost, storing that instead allows us to skip a lot of repeated path->init_once_cost = path->init_cost calls in the code.

◆ rowid_intersection() [1/2]

auto & AccessPath::rowid_intersection ( )
inline

◆ rowid_intersection() [2/2]

const auto & AccessPath::rowid_intersection ( ) const
inline

◆ rowid_union() [1/2]

auto & AccessPath::rowid_union ( )
inline

◆ rowid_union() [2/2]

const auto & AccessPath::rowid_union ( ) const
inline

◆ sample_scan() [1/2]

auto & AccessPath::sample_scan ( )
inline

◆ sample_scan() [2/2]

const auto & AccessPath::sample_scan ( ) const
inline

◆ set_cost()

void AccessPath::set_cost ( double  val)
inline

◆ set_cost_before_filter()

void AccessPath::set_cost_before_filter ( double  val)
inline

◆ set_init_cost()

void AccessPath::set_init_cost ( double  val)
inline

◆ set_init_once_cost()

void AccessPath::set_init_once_cost ( double  val)
inline

◆ set_num_output_rows()

void AccessPath::set_num_output_rows ( double  val)
inline

◆ sort() [1/2]

auto & AccessPath::sort ( )
inline

◆ sort() [2/2]

const auto & AccessPath::sort ( ) const
inline

◆ stream() [1/2]

auto & AccessPath::stream ( )
inline

◆ stream() [2/2]

const auto & AccessPath::stream ( ) const
inline

◆ subsumed_sargable_join_predicates() [1/2]

OverflowBitset & AccessPath::subsumed_sargable_join_predicates ( )
inline

Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applied and will subsume the join predicate entirely, ie., not only should the selectivity not be double-counted, but the predicate itself is redundant and need not be applied as a filter.

(It is an error to have a bit set here but not in applied_sargable_join_predicates.)

◆ subsumed_sargable_join_predicates() [2/2]

const OverflowBitset & AccessPath::subsumed_sargable_join_predicates ( ) const
inline

◆ table_scan() [1/2]

auto & AccessPath::table_scan ( )
inline

◆ table_scan() [2/2]

const auto & AccessPath::table_scan ( ) const
inline

◆ table_value_constructor() [1/2]

auto & AccessPath::table_value_constructor ( )
inline

◆ table_value_constructor() [2/2]

const auto & AccessPath::table_value_constructor ( ) const
inline

◆ temptable_aggregate() [1/2]

auto & AccessPath::temptable_aggregate ( )
inline

◆ temptable_aggregate() [2/2]

const auto & AccessPath::temptable_aggregate ( ) const
inline

◆ unqualified_count() [1/2]

auto & AccessPath::unqualified_count ( )
inline

◆ unqualified_count() [2/2]

const auto & AccessPath::unqualified_count ( ) const
inline

◆ update_rows() [1/2]

auto & AccessPath::update_rows ( )
inline

◆ update_rows() [2/2]

const auto & AccessPath::update_rows ( ) const
inline

◆ weedout() [1/2]

auto & AccessPath::weedout ( )
inline

◆ weedout() [2/2]

const auto & AccessPath::weedout ( ) const
inline

◆ window() [1/2]

auto & AccessPath::window ( )
inline

◆ window() [2/2]

const auto & AccessPath::window ( ) const
inline

◆ zero_rows() [1/2]

auto & AccessPath::zero_rows ( )
inline

◆ zero_rows() [2/2]

const auto & AccessPath::zero_rows ( ) const
inline

◆ zero_rows_aggregated() [1/2]

auto & AccessPath::zero_rows_aggregated ( )
inline

◆ zero_rows_aggregated() [2/2]

const auto & AccessPath::zero_rows_aggregated ( ) const
inline

Member Data Documentation

◆ 

struct { ... } AccessPath::aggregate

◆ allow_clustered_primary_key_scan

bool AccessPath::allow_clustered_primary_key_scan

◆ allow_spill_to_disk

bool AccessPath::allow_spill_to_disk

◆ already_expanded_predicates

bool AccessPath::already_expanded_predicates

◆ 

struct { ... } AccessPath::alternative

◆ 

struct { ... } AccessPath::append

◆ 

struct { ... } AccessPath::bka_join

◆ bka_path

AccessPath* AccessPath::bka_path

◆ 

struct { ... } AccessPath::cache_invalidator

◆ can_be_used_for_imerge

bool AccessPath::can_be_used_for_imerge

◆ can_be_used_for_ror

bool AccessPath::can_be_used_for_ror

◆ cause

const char* AccessPath::cause

◆ child

AccessPath* AccessPath::child

◆ children [1/2]

Mem_root_array<AccessPath *>* AccessPath::children

◆ children [2/2]

Mem_root_array<AppendPathParameters>* AccessPath::children

◆ condition

Item* AccessPath::condition

◆ 

struct { ... } AccessPath::const_table

◆ count_all_rows

bool AccessPath::count_all_rows

◆ count_examined_rows

bool AccessPath::count_examined_rows

Whether this access path counts as one that scans a base table, and thus should be counted towards examined_rows.

It can sometimes seem a bit arbitrary which iterators count towards examined_rows and which ones do not, so the only canonical reference is the tests.

◆ cpk_child

AccessPath* AccessPath::cpk_child

◆ delayed_predicates

OverflowBitset AccessPath::delayed_predicates {0}

Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for instance because they reference other tables, or because because we could not push them down into the nullable side of outer joins).

Used during planning only (see filter_predicates).

◆ 

struct { ... } AccessPath::delete_rows

◆ 

struct { ... } AccessPath::dynamic_index_range_scan

◆ 

struct { ... } AccessPath::eq_ref

◆ equijoin_predicates

OverflowBitset AccessPath::equijoin_predicates

◆ 

struct { ... } AccessPath::fake_single_row

◆ filesort

Filesort* AccessPath::filesort

◆ 

struct { ... } AccessPath::filter

◆ filter_predicates

OverflowBitset AccessPath::filter_predicates {0}

Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” array internal to the join optimizer.

Since bit masks are much cheaper to deal with than creating Item objects, and we don't invent new conditions during join optimization (all of them are known when we begin optimization), we stick to manipulating bit masks during optimization, saying which filters will be applied at this node (a 1-bit means the filter will be applied here; if there are multiple ones, they are ANDed together).

This is used during join optimization only; before iterators are created, we will add FILTER access paths to represent these instead, removing the dependency on the array. Said FILTER paths are by convention created with materialize_subqueries = false, since the by far most common case is that there are no subqueries in the predicate. In other words, if you wish to represent a filter with materialize_subqueries = true, you will need to make an explicit FILTER node.

See also nested_loop_join().equijoin_predicates, which is for filters being applied before nested-loop joins, but is otherwise the same idea.

◆ 

struct { ... } AccessPath::follow_tail

◆ force_sort_rowids

bool AccessPath::force_sort_rowids

◆ forced_by_dbug

bool AccessPath::forced_by_dbug

Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x... statement.

◆ forced_by_hint

bool AccessPath::forced_by_hint

◆ ft_func

Item_func_match* AccessPath::ft_func

◆ 

struct { ... } AccessPath::full_text_search

◆ geometry

bool AccessPath::geometry

◆ 

struct { ... } AccessPath::group_index_skip_scan

◆ group_items

Item** AccessPath::group_items

◆ group_items_size

int AccessPath::group_items_size

◆ has_group_skip_scan

bool AccessPath::has_group_skip_scan

Whether this access path contains a GROUP_INDEX_SKIP_SCAN.

◆ 

struct { ... } AccessPath::hash_join

◆ idx

int AccessPath::idx

◆ immediate_tables

table_map AccessPath::immediate_tables

◆ immediate_update_delete_table

int8_t AccessPath::immediate_update_delete_table {-1}

For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from immediately as the rows are read from the iterator, if this path is only read from once.

-1 if there is no such table in this path.

Note that this is an index into CostingReceiver's array of nodes, and is not necessarily equal to the table number within the query block given by Table_ref::tableno().

The table, if any, is currently always the outermost table in the path.

It is possible to have plans where it would be safe to operate "immediately" on more than one table. For example, if we do a merge join, it is safe to perform immediate deletes on tables on the inner side of the join, since both sides are read only once. (However, we currently do not support merge joins.)

Another possibility is when the outer table of a nested loop join is guaranteed to return at most one row (typically, a unique index lookup aka. eq_ref). Then it's safe to delete immediately from both sides of the nested loop join. But we don't to this yet.

Hash joins read both sides exactly once, However, with hash joins, the scans on the inner tables are not positioned on the correct row when the result of the join is returned, so the immediate delete logic will need to be changed to reposition the underlying scans before doing the immediate deletes. While this can be done, it makes the benefit of immediate deletes less obvious for these tables, and it can also be a loss in some cases, because we lose the deduplication provided by the Unique object used for buffered deletes (the immediate deletes could end up spending time repositioning to already deleted rows). So we currently don't attempt to do immediate deletes from inner tables of hash joins either.

The outer table of a hash join can be deleted from immediately if the inner table fits in memory. If the hash join spills to disk, though, neither the rows of the outer table nor the rows of the inner table come out in the order of the underlying scan, so it is not safe in general to perform immediate deletes on the outer table of a hash join.

If support for immediate operations on multiple tables is added, this member could be changed from a node index to a NodeMap.

◆ index

unsigned AccessPath::index

◆ 

struct { ... } AccessPath::index_distance_scan

◆ 

struct { ... } AccessPath::index_merge

◆ 

struct { ... } AccessPath::index_range_scan

◆ 

struct { ... } AccessPath::index_scan

◆ 

struct { ... } AccessPath::index_skip_scan

◆ inner

AccessPath * AccessPath::inner

◆ is_covering

bool AccessPath::is_covering

◆ is_unique

bool AccessPath::is_unique

◆ iterator

RowIterator* AccessPath::iterator = nullptr

If an iterator has been instantiated for this access path, points to the iterator.

Used for constructing iterators that need to talk to each other (e.g. for recursive CTEs, or BKA join), and also for locating timing information in EXPLAIN ANALYZE queries.

◆ join

JOIN* AccessPath::join

◆ join_predicate

const JoinPredicate* AccessPath::join_predicate

◆ join_type

JoinType AccessPath::join_type

◆ keep_current_rowid

bool AccessPath::keep_current_rowid

◆ key

KEY* AccessPath::key

◆ key_len

size_t AccessPath::key_len

◆ limit

ha_rows AccessPath::limit

◆ 

struct { ... } AccessPath::limit_offset

◆ loosescan_key_len

unsigned AccessPath::loosescan_key_len

◆ m_cost

double AccessPath::m_cost {kUnknownCost}
private

Expected cost to read all of this access path once.

◆ m_cost_before_filter

double AccessPath::m_cost_before_filter {kUnknownCost}
private

If no filter, identical to cost.

init_cost is always the same (filters have zero initialization cost).

◆ m_init_cost

double AccessPath::m_init_cost {kUnknownCost}
private

Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + (k/N) * (cost - init_cost).

Note that EXPLAIN prints out cost of reading the first row because it is easier for the user and also easier to measure in EXPLAIN ANALYZE, but it is easier to do calculations with a pure initialization cost, so that is what we use in this member. kUnknownCost for unknown.

◆ m_init_once_cost

double AccessPath::m_init_once_cost {0.0}
private

Of init_cost, how much of the initialization needs only to be done once per query block.

(This is a cost, not a proportion.) Ie., if the access path can reuse some its initialization work if Init() is called multiple times, this member will be nonzero. A typical example is a materialized table with rematerialize=false; the second time Init() is called, it's a no-op. Most paths will have init_once_cost = 0.0, ie., repeated scans will cost the same. We do not intend to use this field to model cache effects.

This is currently not printed in EXPLAIN, only optimizer trace.

◆ m_num_output_rows

double AccessPath::m_num_output_rows {kUnknownRowCount}
private

Expected number of output rows.

◆ 

struct { ... } AccessPath::materialize

◆ 

struct { ... } AccessPath::materialize_information_schema_table

◆ materialize_subqueries

bool AccessPath::materialize_subqueries

◆ 

struct { ... } AccessPath::materialized_table_function

◆ 

struct { ... } AccessPath::mrr

◆ mrr_buf_size

unsigned AccessPath::mrr_buf_size

◆ mrr_flags [1/2]

int AccessPath::mrr_flags

◆ mrr_flags [2/2]

unsigned AccessPath::mrr_flags

◆ mrr_length_per_rec

unsigned AccessPath::mrr_length_per_rec

◆ name

const char* AccessPath::name

◆ need_rows_in_rowid_order

bool AccessPath::need_rows_in_rowid_order

◆ needs_buffering

bool AccessPath::needs_buffering

◆ 

struct { ... } AccessPath::nested_loop_join
Initial value:
= {nullptr, nullptr, JoinType::INNER, false, false,
nullptr, {}}

◆ 

struct { ... } AccessPath::nested_loop_semijoin_with_duplicate_removal

◆ num_output_rows_before_filter

double AccessPath::num_output_rows_before_filter {kUnknownRowCount}

If no filter, identical to num_output_rows.

◆ num_ranges

unsigned AccessPath::num_ranges

◆ num_used_key_parts

unsigned AccessPath::num_used_key_parts

◆ offset

ha_rows AccessPath::offset

◆ olap

olap_type AccessPath::olap

◆ order

ORDER* AccessPath::order

◆ ordering_state

int AccessPath::ordering_state = 0

Which ordering the rows produced by this path follow, if any (see interesting_orders.h).

This is really a LogicalOrderings::StateIndex, but we don't want to add a dependency on interesting_orders.h from this file, so we use the base type instead of the typedef here.

◆ outer

AccessPath* AccessPath::outer

◆ output_refs

Mem_root_array<Item_values_column *>* AccessPath::output_refs

◆ param [1/3]

IndexSkipScanParameters* AccessPath::param

◆ param [2/3]

GroupIndexSkipScanParameters* AccessPath::param

◆ param [3/3]

MaterializePathParameters* AccessPath::param

◆ parameter_tables

hypergraph::NodeMap AccessPath::parameter_tables {0}

If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this access path are evaluated; that is, this access path must be put on the inner side of a nested-loop join (or multiple such joins) where the outer side includes all of the given tables.

The most obvious case for this is dependent tables in LATERAL, but a more common case is when we have pushed join conditions referring to those tables; e.g., if this access path represents t1 and we have a condition t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will be set in this bitmap. We can still join in other tables, deferring t2, but the bit(s) will then propagate, and we cannot be on the right side of a hash join until parameter_tables is zero again. (Also see DisallowParameterizedJoinPath() for when we disallow such deferring, as an optimization.)

As a special case, we allow setting RAND_TABLE_BIT, even though it is normally part of a table_map, not a NodeMap. In this case, it specifies that the access path is entirely noncachable, because it depends on something nondeterministic or an outer reference, and thus can never be on the right side of a hash join, ever.

◆ pfs_batch_mode

bool AccessPath::pfs_batch_mode

◆ provide_rowid

bool AccessPath::provide_rowid

◆ 

struct { ... } AccessPath::pushed_join_ref

◆ qep_tab

QEP_TAB* AccessPath::qep_tab

◆ range

QUICK_RANGE* AccessPath::range

◆ ranges

QUICK_RANGE** AccessPath::ranges

◆ rec_per_key

float AccessPath::rec_per_key

◆ ref [1/2]

Index_lookup* AccessPath::ref

◆  [2/2]

struct { ... } AccessPath::ref

◆ 

struct { ... } AccessPath::ref_or_null

◆ ref_slice

int AccessPath::ref_slice

◆ reject_multiple_rows

bool AccessPath::reject_multiple_rows

◆ remove_duplicates [1/2]

bool AccessPath::remove_duplicates

◆  [2/2]

struct { ... } AccessPath::remove_duplicates

◆ 

struct { ... } AccessPath::remove_duplicates_on_index

◆ retrieve_full_rows

bool AccessPath::retrieve_full_rows

◆ reuse_handler

bool AccessPath::reuse_handler

◆ reverse

bool AccessPath::reverse

◆ rewrite_semi_to_inner

bool AccessPath::rewrite_semi_to_inner

◆ 

struct { ... } AccessPath::rowid_intersection

◆ 

struct { ... } AccessPath::rowid_union

◆ safe_for_rowid

Safety AccessPath::safe_for_rowid = SAFE

Whether it is safe to get row IDs (for sorting) from this access path.

◆ 

struct { ... } AccessPath::sample_scan

◆ sampling_percentage

double AccessPath::sampling_percentage

◆ sampling_type

enum tablesample_type AccessPath::sampling_type

◆ secondary_engine_data

void* AccessPath::secondary_engine_data {nullptr}

Auxiliary data used by a secondary storage engine while processing the access path during optimization and execution.

The secondary storage engine is free to store any useful information in this member, for example extra statistics or cost estimates. The data pointed to is fully owned by the secondary storage engine, and it is the responsibility of the secondary engine to manage the memory and make sure it is properly destroyed.

◆ send_records_override

ha_rows* AccessPath::send_records_override

◆ 

struct { ... } AccessPath::sort

◆ store_rowids

bool AccessPath::store_rowids

◆ 

struct { ... } AccessPath::stream

◆ subquery_cost

double AccessPath::subquery_cost

The total cost of executing the queries that we materialize.

◆ subquery_path

AccessPath* AccessPath::subquery_path

◆ subquery_rows

double AccessPath::subquery_rows

The number of materialized rows (as opposed to the number of rows fetched by table_path).

Needed for 'explain'.

◆ table [1/2]

TABLE* AccessPath::table

◆ table [2/2]

const TABLE* AccessPath::table

◆ table_function

Table_function* AccessPath::table_function

◆ table_list

Table_ref* AccessPath::table_list

◆ table_path

AccessPath* AccessPath::table_path

◆ 

struct { ... } AccessPath::table_scan

◆ table_scan_path

AccessPath* AccessPath::table_scan_path

◆ 

struct { ... } AccessPath::table_value_constructor

◆ tables_to_delete_from

table_map AccessPath::tables_to_delete_from

◆ tables_to_get_rowid_for

table_map AccessPath::tables_to_get_rowid_for

◆ tables_to_update

table_map AccessPath::tables_to_update

◆ temp_table

TABLE* AccessPath::temp_table

◆ temp_table_param

Temp_table_param* AccessPath::temp_table_param

◆ 

struct { ... } AccessPath::temptable_aggregate

◆ type

enum AccessPath::Type AccessPath::type

◆ 

union { ... } AccessPath::u

◆ 

struct { ... } AccessPath::unqualified_count

◆ unwrap_rollup

bool AccessPath::unwrap_rollup

◆ 

struct { ... } AccessPath::update_rows

◆ use_limit

bool AccessPath::use_limit

◆ use_order

bool AccessPath::use_order

◆ used_key_part

KEY_PART* AccessPath::used_key_part

◆ used_ref

Index_lookup* AccessPath::used_ref

◆ using_extended_key_parts

bool AccessPath::using_extended_key_parts

◆ 

struct { ... } AccessPath::weedout

◆ weedout_table

SJ_TMP_TABLE* AccessPath::weedout_table

◆ window [1/2]

Window* AccessPath::window

◆  [2/2]

struct { ... } AccessPath::window

◆ 

struct { ... } AccessPath::zero_rows

◆ 

struct { ... } AccessPath::zero_rows_aggregated

The documentation for this struct was generated from the following file: