MySQL 8.3.0
Source Code Documentation
path_helpers.h
Go to the documentation of this file.
1/* Copyright (c) 2021, 2023, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
22
23#ifndef SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
24#define SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
25
26/**
27 @file
28 Various small helpers to abstract over the fact that AccessPath can contain
29 a number of different range scan types.
30 */
31
39
40inline bool is_loose_index_scan(const AccessPath *path) {
41 return path->type == AccessPath::INDEX_SKIP_SCAN ||
43}
44
46 switch (path->type) {
48 return path->index_skip_scan().param->has_aggregate_function;
49 break;
51 return path->group_index_skip_scan().param->have_agg_distinct;
52 break;
53 default:
54 return false;
55 }
56}
57
58/**
59 Whether the range access method is capable of returning records
60 in reverse order.
61 */
63 return path->type == AccessPath::INDEX_RANGE_SCAN;
64}
65
66/**
67 Whether the access path is an INDEX_RANGE_SCAN that returns rows in reverse
68 order. (Note that non-range index scans return false here.)
69 */
71 return path->type == AccessPath::INDEX_RANGE_SCAN &&
72 path->index_range_scan().reverse;
73}
74
75/**
76 Ask the AccessPath to reverse itself; returns false if successful.
77 Overridden only in INDEX_RANGE_SCAN.
78 */
79inline bool make_reverse(uint used_key_parts, AccessPath *path) {
81 if (path->index_range_scan().geometry) {
82 return true;
83 }
84 path->index_range_scan().reverse = true;
85 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
86 path->index_range_scan().using_extended_key_parts =
87 (used_key_parts > table->key_info[path->index_range_scan().index]
88 .user_defined_key_parts);
89 return false;
90 } else {
91 return true;
92 }
93}
94
96 switch (path->type) {
98 path->index_range_scan().mrr_flags |= HA_MRR_SORTED;
99 break;
102 // Always sorted already.
103 break;
104 default:
105 assert(false);
106 }
107}
108
109/**
110 If this is an index range scan, and that range scan uses a single
111 index, returns the index used. Otherwise, MAX_KEY.
112 */
113inline unsigned used_index(const AccessPath *path) {
114 switch (path->type) {
116 return path->index_range_scan().index;
118 return path->index_skip_scan().index;
120 return path->group_index_skip_scan().index;
121 default:
122 return MAX_KEY;
123 }
124}
125
126/**
127 Return true if there is only one range and this uses the whole unique key.
128 */
129inline bool unique_key_range(const AccessPath *path) {
130 if (path->type != AccessPath::INDEX_RANGE_SCAN) {
131 return false;
132 }
133 if (path->index_range_scan().num_ranges == 1) {
134 QUICK_RANGE *tmp = path->index_range_scan().ranges[0];
135 if ((tmp->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) {
136 KEY *key =
137 path->index_range_scan().used_key_part[0].field->table->key_info +
138 path->index_range_scan().index;
139 return (key->flags & HA_NOSAME) && key->key_length == tmp->min_length;
140 }
141 }
142 return false;
143}
144
145inline void get_fields_used(const AccessPath *path, MY_BITMAP *used_fields) {
146 switch (path->type) {
148 for (uint i = 0; i < path->index_range_scan().num_used_key_parts; ++i) {
150 used_fields,
151 path->index_range_scan().used_key_part[i].field->field_index());
152 }
153 break;
155 for (AccessPath *child : *path->index_merge().children) {
156 get_fields_used(child, used_fields);
157 }
158 break;
160 for (AccessPath *child : *path->rowid_intersection().children) {
161 get_fields_used(child, used_fields);
162 }
163 if (path->rowid_intersection().cpk_child != nullptr) {
164 get_fields_used(path->rowid_intersection().cpk_child, used_fields);
165 }
166 break;
168 for (AccessPath *child : *path->rowid_union().children) {
169 get_fields_used(child, used_fields);
170 }
171 break;
173 for (uint i = 0; i < path->index_skip_scan().num_used_key_parts; ++i) {
174 bitmap_set_bit(used_fields, path->index_skip_scan()
175 .param->index_info->key_part[i]
176 .field->field_index());
177 }
178 break;
180 for (uint i = 0; i < path->group_index_skip_scan().num_used_key_parts;
181 ++i) {
182 bitmap_set_bit(used_fields, path->group_index_skip_scan()
183 .param->index_info->key_part[i]
184 .field->field_index());
185 }
186 break;
187 default:
188 assert(false);
189 }
190}
191
192inline unsigned get_used_key_parts(const AccessPath *path) {
193 switch (path->type) {
195 return path->index_range_scan().num_used_key_parts;
197 return path->index_skip_scan().num_used_key_parts;
199 return path->group_index_skip_scan().num_used_key_parts;
203 return 0;
204 default:
205 assert(false);
206 return 0;
207 }
208}
209
210/**
211 Return whether any index used by this range scan uses the field(s)
212 marked in passed bitmap. Assert-fails if not a range scan.
213 */
215 const MY_BITMAP *fields) {
216 switch (path->type) {
218 return is_key_used(path->index_range_scan().used_key_part[0].field->table,
219 path->index_range_scan().index, fields);
221 for (AccessPath *child : *path->index_merge().children) {
222 if (uses_index_on_fields(child, fields)) {
223 return true;
224 }
225 }
226 return false;
228 for (AccessPath *child : *path->rowid_intersection().children) {
229 if (uses_index_on_fields(child, fields)) {
230 return true;
231 }
232 }
233 return path->rowid_intersection().cpk_child != nullptr &&
234 uses_index_on_fields(path->rowid_intersection().cpk_child, fields);
236 for (AccessPath *child : *path->rowid_union().children) {
237 if (uses_index_on_fields(child, fields)) {
238 return true;
239 }
240 }
241 return false;
243 return is_key_used(path->index_skip_scan().table,
244 path->index_skip_scan().index, fields);
246 return is_key_used(path->group_index_skip_scan().table,
247 path->group_index_skip_scan().index, fields);
248 default:
249 assert(false);
250 return false;
251 }
252}
253
254/**
255 Get the total length of first used_key_parts parts of the key,
256 in bytes. Only applicable for range access types that use a single
257 index (others will assert-fail).
258 */
259inline unsigned get_max_used_key_length(const AccessPath *path) {
260 switch (path->type) {
262 int max_used_key_length = 0;
263 Bounds_checked_array ranges{path->index_range_scan().ranges,
264 path->index_range_scan().num_ranges};
265 for (const QUICK_RANGE *range : ranges) {
266 max_used_key_length =
267 std::max<int>(max_used_key_length, range->min_length);
268 max_used_key_length =
269 std::max<int>(max_used_key_length, range->max_length);
270 }
271 return max_used_key_length;
272 }
274 int max_used_key_length = 0;
275 KEY_PART_INFO *p = path->index_skip_scan().param->index_info->key_part;
276 for (uint i = 0; i < path->index_skip_scan().num_used_key_parts;
277 i++, p++) {
278 max_used_key_length += p->store_length;
279 }
280 return max_used_key_length;
281 }
283 return path->group_index_skip_scan().param->max_used_key_length;
284 default:
285 assert(false);
286 return 0;
287 }
288}
289
290/*
291 Append text representation of the range scan (what and how is
292 merged) to str. The result is added to "Extra" field in EXPLAIN output.
293 */
294inline void add_info_string(const AccessPath *path, String *str) {
295 switch (path->type) {
297 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
298 KEY *key_info = table->key_info + path->index_range_scan().index;
299 str->append(key_info->name);
300 break;
301 }
303 bool first = true;
304 TABLE *table = path->index_merge().table;
305 str->append(STRING_WITH_LEN("sort_union("));
306
307 // For EXPLAIN compatibility with older versions, PRIMARY is always
308 // printed last.
309 for (bool print_primary : {false, true}) {
310 for (AccessPath *child : *path->index_merge().children) {
311 const bool is_primary = table->file->primary_key_is_clustered() &&
312 used_index(child) == table->s->primary_key;
313 if (is_primary != print_primary) continue;
314 if (!first)
315 str->append(',');
316 else
317 first = false;
318 ::add_info_string(child, str);
319 }
320 }
321 str->append(')');
322 break;
323 }
325 bool first = true;
326 str->append(STRING_WITH_LEN("intersect("));
327 for (AccessPath *current : *path->rowid_intersection().children) {
328 if (!first)
329 str->append(',');
330 else
331 first = false;
332 ::add_info_string(current, str);
333 }
334 if (path->rowid_intersection().cpk_child) {
335 str->append(',');
336 ::add_info_string(path->rowid_intersection().cpk_child, str);
337 }
338 str->append(')');
339 break;
340 }
342 bool first = true;
343 str->append(STRING_WITH_LEN("union("));
344 for (AccessPath *current : *path->rowid_union().children) {
345 if (!first)
346 str->append(',');
347 else
348 first = false;
349 ::add_info_string(current, str);
350 }
351 str->append(')');
352 break;
353 }
355 str->append(STRING_WITH_LEN("index_for_skip_scan("));
356 str->append(path->index_skip_scan().param->index_info->name);
357 str->append(')');
358 break;
359 }
361 str->append(STRING_WITH_LEN("index_for_group_by("));
362 str->append(path->group_index_skip_scan().param->index_info->name);
363 str->append(')');
364 break;
365 }
366 default:
367 assert(false);
368 }
369}
370
371/*
372 Append comma-separated list of keys this quick select uses to key_names;
373 append comma-separated list of corresponding used lengths to used_lengths.
374 This is used by select_describe.
375
376 path must be a range scan, or there will be an assert.
377 */
378inline void add_keys_and_lengths(const AccessPath *path, String *key_names,
379 String *used_lengths) {
380 switch (path->type) {
382 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
383 KEY *key_info = table->key_info + path->index_range_scan().index;
384 key_names->append(key_info->name);
385
386 char buf[64];
387 size_t length =
389 used_lengths->append(buf, length);
390 break;
391 }
393 add_keys_and_lengths_index_merge(path, key_names, used_lengths);
394 break;
396 add_keys_and_lengths_rowid_intersection(path, key_names, used_lengths);
397 break;
399 add_keys_and_lengths_rowid_union(path, key_names, used_lengths);
400 break;
402 key_names->append(path->index_skip_scan().param->index_info->name);
403
404 char buf[64];
405 uint length =
407 used_lengths->append(buf, length);
408
409 break;
410 }
412 key_names->append(path->group_index_skip_scan().param->index_info->name);
413
414 char buf[64];
415 uint length =
417 used_lengths->append(buf, length);
418
419 break;
420 }
421 default:
422 assert(false);
423 }
424}
425
426/**
427 Add basic info for this range scan to the optimizer trace.
428
429 path must be a range scan, or there will be an assert.
430
431 @param thd Thread handle
432 @param param Parameters for range analysis of this table
433 @param trace_object The optimizer trace object the info is appended to
434 */
435inline void trace_basic_info(THD *thd, const AccessPath *path,
436 const RANGE_OPT_PARAM *param,
437 Opt_trace_object *trace_object) {
438 switch (path->type) {
440 trace_basic_info_index_range_scan(thd, path, param, trace_object);
441 break;
443 trace_basic_info_index_merge(thd, path, param, trace_object);
444 break;
446 trace_basic_info_rowid_intersection(thd, path, param, trace_object);
447 break;
449 trace_basic_info_rowid_union(thd, path, param, trace_object);
450 break;
452 trace_basic_info_index_skip_scan(thd, path, param, trace_object);
453 break;
455 trace_basic_info_group_index_skip_scan(thd, path, param, trace_object);
456 break;
457 default:
458 assert(false);
459 }
460}
461
462inline bool get_forced_by_hint(const AccessPath *path) {
463 switch (path->type) {
465 return false; // There is no hint for plain range scan.
467 return path->index_merge().forced_by_hint;
469 return path->rowid_intersection().forced_by_hint;
471 return path->rowid_union().forced_by_hint;
473 return path->index_skip_scan().forced_by_hint;
475 return path->group_index_skip_scan().forced_by_hint;
476 default:
477 assert(false);
478 return false;
479 }
480}
481
482#ifndef NDEBUG
483/*
484 Print quick select information to DBUG_FILE. Caller is responsible
485 for locking DBUG_FILE before this call and unlocking it afterwards.
486 */
487inline void dbug_dump(const AccessPath *path, int indent, bool verbose) {
488 switch (path->type) {
490 const auto &param = path->index_range_scan();
491 dbug_dump_range(indent, verbose, param.used_key_part[0].field->table,
492 param.index, param.used_key_part,
493 {param.ranges, param.num_ranges});
494 break;
495 }
497 dbug_dump_index_merge(indent, verbose, *path->index_merge().children);
498 break;
499 }
502 *path->rowid_intersection().children);
503 break;
505 dbug_dump_rowid_union(indent, verbose, *path->rowid_union().children);
506 break;
509 break;
512 break;
513 default:
514 assert(false);
515 }
516}
517#endif
518
519#endif // SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
A wrapper class which provides array bounds checking.
Definition: sql_array.h:46
Definition: key.h:56
Definition: key.h:112
const char * name
Name of key.
Definition: key.h:152
A JSON object (unordered set of key/value pairs).
Definition: opt_trace.h:801
Definition: range_optimizer.h:68
uint16 flag
Stores bitwise-or'ed bits defined in enum key_range_flags.
Definition: range_optimizer.h:74
uint16 min_length
Definition: range_optimizer.h:71
Definition: range_opt_param.h:28
Using this class is fraught with peril, and you need to be very careful when doing so.
Definition: sql_string.h:166
bool append(const String &s)
Definition: sql_string.cc:418
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:35
static char buf[MAX_BUF]
Definition: conf_to_src.cc:72
const char * p
Definition: ctype-mb.cc:1234
void trace_basic_info_group_index_skip_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *, Opt_trace_object *trace_object)
Definition: group_index_skip_scan_plan.cc:78
void dbug_dump_group_index_skip_scan(int indent, bool, const AccessPath *path)
Definition: group_index_skip_scan_plan.cc:1836
void trace_basic_info_index_merge(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: index_merge_plan.cc:33
void add_keys_and_lengths_index_merge(const AccessPath *path, String *key_names, String *used_lengths)
Definition: index_merge_plan.cc:45
void dbug_dump_index_merge(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: index_merge_plan.cc:70
void trace_basic_info_index_range_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: index_range_scan_plan.cc:793
void dbug_dump_range(int indent, bool verbose, TABLE *table, int index, KEY_PART *used_key_part, Bounds_checked_array< QUICK_RANGE * > ranges)
Definition: index_range_scan_plan.cc:1311
void dbug_dump_index_skip_scan(int indent, bool verbose, const AccessPath *path)
Definition: index_skip_scan_plan.cc:803
void trace_basic_info_index_skip_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *, Opt_trace_object *trace_object)
Definition: index_skip_scan_plan.cc:70
MYSQL_STRINGS_EXPORT char * longlong10_to_str(int64_t val, char *dst, int radix)
Converts a 64-bit integer to its string representation in decimal notation.
Definition: int2str.cc:97
bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields)
Definition: key.cc:410
@ EQ_RANGE
Definition: my_base.h:1096
@ NULL_RANGE
Definition: my_base.h:1101
#define HA_NOSAME
Do not allow duplicate records.
Definition: my_base.h:474
static void bitmap_set_bit(MY_BITMAP *map, uint bit)
Definition: my_bitmap.h:79
static uint verbose
Definition: mysqlcheck.cc:65
static char * path
Definition: mysqldump.cc:148
std::string str(const mysqlrouter::ConfigGenerator::Options::Endpoint &ep)
Definition: config_generator.cc:1065
static PFS_engine_table_share_proxy table
Definition: pfs.cc:60
Definition: buf0block_hint.cc:29
bool length(const dd::Spatial_reference_system *srs, const Geometry *g1, double *length, bool *null) noexcept
Computes the length of linestrings and multilinestrings.
Definition: length.cc:75
void add_info_string(const AccessPath *path, String *str)
Definition: path_helpers.h:294
bool unique_key_range(const AccessPath *path)
Return true if there is only one range and this uses the whole unique key.
Definition: path_helpers.h:129
bool is_loose_index_scan(const AccessPath *path)
Definition: path_helpers.h:40
void add_keys_and_lengths(const AccessPath *path, String *key_names, String *used_lengths)
Definition: path_helpers.h:378
unsigned used_index(const AccessPath *path)
If this is an index range scan, and that range scan uses a single index, returns the index used.
Definition: path_helpers.h:113
bool get_forced_by_hint(const AccessPath *path)
Definition: path_helpers.h:462
unsigned get_max_used_key_length(const AccessPath *path)
Get the total length of first used_key_parts parts of the key, in bytes.
Definition: path_helpers.h:259
bool uses_index_on_fields(const AccessPath *path, const MY_BITMAP *fields)
Return whether any index used by this range scan uses the field(s) marked in passed bitmap.
Definition: path_helpers.h:214
void trace_basic_info(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Add basic info for this range scan to the optimizer trace.
Definition: path_helpers.h:435
bool is_agg_loose_index_scan(const AccessPath *path)
Definition: path_helpers.h:45
void set_need_sorted_output(AccessPath *path)
Definition: path_helpers.h:95
unsigned get_used_key_parts(const AccessPath *path)
Definition: path_helpers.h:192
bool make_reverse(uint used_key_parts, AccessPath *path)
Ask the AccessPath to reverse itself; returns false if successful.
Definition: path_helpers.h:79
bool is_reverse_sorted_range(const AccessPath *path)
Whether the access path is an INDEX_RANGE_SCAN that returns rows in reverse order.
Definition: path_helpers.h:70
void get_fields_used(const AccessPath *path, MY_BITMAP *used_fields)
Definition: path_helpers.h:145
void dbug_dump(const AccessPath *path, int indent, bool verbose)
Definition: path_helpers.h:487
bool reverse_sort_possible(const AccessPath *path)
Whether the range access method is capable of returning records in reverse order.
Definition: path_helpers.h:62
required string key
Definition: replication_asynchronous_connection_failover.proto:59
void add_keys_and_lengths_rowid_union(const AccessPath *path, String *key_names, String *used_lengths)
Definition: rowid_ordered_retrieval_plan.cc:946
void add_keys_and_lengths_rowid_intersection(const AccessPath *path, String *key_names, String *used_lengths)
Definition: rowid_ordered_retrieval_plan.cc:911
void dbug_dump_rowid_union(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: rowid_ordered_retrieval_plan.cc:971
void trace_basic_info_rowid_intersection(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: rowid_ordered_retrieval_plan.cc:111
void trace_basic_info_rowid_union(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: rowid_ordered_retrieval_plan.cc:129
void dbug_dump_rowid_intersection(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: rowid_ordered_retrieval_plan.cc:961
#define HA_MRR_SORTED
Definition: handler.h:3977
constexpr const unsigned int MAX_KEY
Definition: sql_const.h:45
#define STRING_WITH_LEN(X)
Definition: string_with_len.h:28
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:212
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:248
@ INDEX_RANGE_SCAN
Definition: access_path.h:243
@ ROWID_UNION
Definition: access_path.h:246
@ INDEX_SKIP_SCAN
Definition: access_path.h:247
@ INDEX_MERGE
Definition: access_path.h:244
@ ROWID_INTERSECTION
Definition: access_path.h:245
Definition: my_bitmap.h:42
Definition: table.h:1403
Definition: gen_lex_token.cc:148