PostgreSQL Source Code git master
pathnode.h File Reference
#include "nodes/bitmapset.h"
#include "nodes/pathnodes.h"
Include dependency graph for pathnode.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

int compare_path_costs (Path *path1, Path *path2, CostSelector criterion)
 
int compare_fractional_path_costs (Path *path1, Path *path2, double fraction)
 
void set_cheapest (RelOptInfo *parent_rel)
 
void add_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_path_precheck (RelOptInfo *parent_rel, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
 
void add_partial_path (RelOptInfo *parent_rel, Path *new_path)
 
bool add_partial_path_precheck (RelOptInfo *parent_rel, int disabled_nodes, Cost total_cost, List *pathkeys)
 
Pathcreate_seqscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
 
Pathcreate_samplescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
IndexPathcreate_index_path (PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
 
BitmapHeapPathcreate_bitmap_heap_path (PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
 
BitmapAndPathcreate_bitmap_and_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
BitmapOrPathcreate_bitmap_or_path (PlannerInfo *root, RelOptInfo *rel, List *bitmapquals)
 
TidPathcreate_tidscan_path (PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer)
 
TidRangePathcreate_tidrangescan_path (PlannerInfo *root, RelOptInfo *rel, List *tidrangequals, Relids required_outer)
 
AppendPathcreate_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
 
MergeAppendPathcreate_merge_append_path (PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *pathkeys, Relids required_outer)
 
GroupResultPathcreate_group_result_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
 
MaterialPathcreate_material_path (RelOptInfo *rel, Path *subpath)
 
MemoizePathcreate_memoize_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, Cardinality est_calls)
 
GatherPathcreate_gather_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, Relids required_outer, double *rows)
 
GatherMergePathcreate_gather_merge_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
 
SubqueryScanPathcreate_subqueryscan_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
 
Pathcreate_functionscan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_valuesscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_tablefuncscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_ctescan_path (PlannerInfo *root, RelOptInfo *rel, List *pathkeys, Relids required_outer)
 
Pathcreate_namedtuplestorescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_resultscan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
Pathcreate_worktablescan_path (PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 
ForeignPathcreate_foreignscan_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_join_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
ForeignPathcreate_foreign_upper_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, double rows, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Path *fdw_outerpath, List *fdw_restrictinfo, List *fdw_private)
 
Relids calc_nestloop_required_outer (Relids outerrelids, Relids outer_paramrels, Relids innerrelids, Relids inner_paramrels)
 
Relids calc_non_nestloop_required_outer (Path *outer_path, Path *inner_path)
 
NestPathcreate_nestloop_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer)
 
MergePathcreate_mergejoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, List *restrict_clauses, List *pathkeys, Relids required_outer, List *mergeclauses, List *outersortkeys, List *innersortkeys, int outer_presorted_keys)
 
HashPathcreate_hashjoin_path (PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, JoinCostWorkspace *workspace, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses)
 
ProjectionPathcreate_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
Pathapply_projection_to_path (PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
 
ProjectSetPathcreate_set_projection_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
 
SortPathcreate_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
 
IncrementalSortPathcreate_incremental_sort_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
 
GroupPathcreate_group_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
 
UniquePathcreate_unique_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
 
AggPathcreate_agg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
 
GroupingSetsPathcreate_groupingsets_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
 
MinMaxAggPathcreate_minmaxagg_path (PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *mmaggregates, List *quals)
 
WindowAggPathcreate_windowagg_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
 
SetOpPathcreate_setop_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, SetOpCmd cmd, SetOpStrategy strategy, List *groupList, double numGroups, double outputRows)
 
RecursiveUnionPathcreate_recursiveunion_path (PlannerInfo *root, RelOptInfo *rel, Path *leftpath, Path *rightpath, PathTarget *target, List *distinctList, int wtParam, double numGroups)
 
LockRowsPathcreate_lockrows_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
 
ModifyTablePathcreate_modifytable_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
 
LimitPathcreate_limit_path (PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
 
void adjust_limit_rows_costs (double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
 
Pathreparameterize_path (PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
 
Pathreparameterize_path_by_child (PlannerInfo *root, Path *path, RelOptInfo *child_rel)
 
bool path_is_reparameterizable_by_child (Path *path, RelOptInfo *child_rel)
 
void setup_simple_rel_arrays (PlannerInfo *root)
 
void expand_planner_arrays (PlannerInfo *root, int add_size)
 
RelOptInfobuild_simple_rel (PlannerInfo *root, int relid, RelOptInfo *parent)
 
RelOptInfobuild_simple_grouped_rel (PlannerInfo *root, RelOptInfo *rel)
 
RelOptInfobuild_grouped_rel (PlannerInfo *root, RelOptInfo *rel)
 
RelOptInfofind_base_rel (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_noerr (PlannerInfo *root, int relid)
 
RelOptInfofind_base_rel_ignore_join (PlannerInfo *root, int relid)
 
RelOptInfofind_join_rel (PlannerInfo *root, Relids relids)
 
RelOptInfobuild_join_rel (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, List **restrictlist_ptr)
 
Relids min_join_parameterization (PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
 
RelOptInfofetch_upper_rel (PlannerInfo *root, UpperRelationKind kind, Relids relids)
 
Relids find_childrel_parents (PlannerInfo *root, RelOptInfo *rel)
 
ParamPathInfoget_baserel_parampathinfo (PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
 
ParamPathInfoget_joinrel_parampathinfo (PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
 
ParamPathInfoget_appendrel_parampathinfo (RelOptInfo *appendrel, Relids required_outer)
 
ParamPathInfofind_param_path_info (RelOptInfo *rel, Relids required_outer)
 
Bitmapsetget_param_path_clause_serials (Path *path)
 
RelOptInfobuild_child_join_rel (PlannerInfo *root, RelOptInfo *outer_rel, RelOptInfo *inner_rel, RelOptInfo *parent_joinrel, List *restrictlist, SpecialJoinInfo *sjinfo, int nappinfos, AppendRelInfo **appinfos)
 
RelAggInfocreate_rel_agg_info (PlannerInfo *root, RelOptInfo *rel, bool calculate_grouped_rows)
 

Function Documentation

◆ add_partial_path()

void add_partial_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 795 of file pathnode.c.

796{
797 bool accept_new = true; /* unless we find a superior old path */
798 int insert_at = 0; /* where to insert new item */
799 ListCell *p1;
800
801 /* Check for query cancel. */
803
804 /* Path to be added must be parallel safe. */
805 Assert(new_path->parallel_safe);
806
807 /* Relation should be OK for parallelism, too. */
808 Assert(parent_rel->consider_parallel);
809
810 /*
811 * As in add_path, throw out any paths which are dominated by the new
812 * path, but throw out the new path if some existing path dominates it.
813 */
814 foreach(p1, parent_rel->partial_pathlist)
815 {
816 Path *old_path = (Path *) lfirst(p1);
817 bool remove_old = false; /* unless new proves superior */
818 PathKeysComparison keyscmp;
819
820 /* Compare pathkeys. */
821 keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
822
823 /* Unless pathkeys are incompatible, keep just one of the two paths. */
824 if (keyscmp != PATHKEYS_DIFFERENT)
825 {
826 if (unlikely(new_path->disabled_nodes != old_path->disabled_nodes))
827 {
828 if (new_path->disabled_nodes > old_path->disabled_nodes)
829 accept_new = false;
830 else
831 remove_old = true;
832 }
833 else if (new_path->total_cost > old_path->total_cost
835 {
836 /* New path costs more; keep it only if pathkeys are better. */
837 if (keyscmp != PATHKEYS_BETTER1)
838 accept_new = false;
839 }
840 else if (old_path->total_cost > new_path->total_cost
842 {
843 /* Old path costs more; keep it only if pathkeys are better. */
844 if (keyscmp != PATHKEYS_BETTER2)
845 remove_old = true;
846 }
847 else if (keyscmp == PATHKEYS_BETTER1)
848 {
849 /* Costs are about the same, new path has better pathkeys. */
850 remove_old = true;
851 }
852 else if (keyscmp == PATHKEYS_BETTER2)
853 {
854 /* Costs are about the same, old path has better pathkeys. */
855 accept_new = false;
856 }
857 else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
858 {
859 /* Pathkeys are the same, and the old path costs more. */
860 remove_old = true;
861 }
862 else
863 {
864 /*
865 * Pathkeys are the same, and new path isn't materially
866 * cheaper.
867 */
868 accept_new = false;
869 }
870 }
871
872 /*
873 * Remove current element from partial_pathlist if dominated by new.
874 */
875 if (remove_old)
876 {
877 parent_rel->partial_pathlist =
879 pfree(old_path);
880 }
881 else
882 {
883 /* new belongs after this old path if it has cost >= old's */
884 if (new_path->total_cost >= old_path->total_cost)
885 insert_at = foreach_current_index(p1) + 1;
886 }
887
888 /*
889 * If we found an old path that dominates new_path, we can quit
890 * scanning the partial_pathlist; we will not add new_path, and we
891 * assume new_path cannot dominate any later path.
892 */
893 if (!accept_new)
894 break;
895 }
896
897 if (accept_new)
898 {
899 /* Accept the new path: insert it at proper place */
900 parent_rel->partial_pathlist =
901 list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
902 }
903 else
904 {
905 /* Reject and recycle the new path */
906 pfree(new_path);
907 }
908}
#define unlikely(x)
Definition: c.h:407
Assert(PointerIsAligned(start, uint64))
List * list_insert_nth(List *list, int pos, void *datum)
Definition: list.c:439
void pfree(void *pointer)
Definition: mcxt.c:1594
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
#define STD_FUZZ_FACTOR
Definition: pathnode.c:49
PathKeysComparison
Definition: paths.h:211
@ PATHKEYS_BETTER2
Definition: paths.h:214
@ PATHKEYS_BETTER1
Definition: paths.h:213
@ PATHKEYS_DIFFERENT
Definition: paths.h:215
#define lfirst(lc)
Definition: pg_list.h:172
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define foreach_delete_current(lst, var_or_cell)
Definition: pg_list.h:391
List * pathkeys
Definition: pathnodes.h:1913
int disabled_nodes
Definition: pathnodes.h:1908
Cost total_cost
Definition: pathnodes.h:1910
bool parallel_safe
Definition: pathnodes.h:1902
bool consider_parallel
Definition: pathnodes.h:943
List * partial_pathlist
Definition: pathnodes.h:956

References Assert(), CHECK_FOR_INTERRUPTS, compare_pathkeys(), RelOptInfo::consider_parallel, Path::disabled_nodes, foreach_current_index, foreach_delete_current, lfirst, list_insert_nth(), Path::parallel_safe, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, pfree(), STD_FUZZ_FACTOR, Path::total_cost, and unlikely.

Referenced by add_paths_to_append_rel(), build_index_paths(), build_setop_child_paths(), create_partial_bitmap_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_partial_unique_paths(), create_plain_partial_paths(), generate_grouped_paths(), grouping_planner(), set_subquery_pathlist(), try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_partial_path_precheck()

bool add_partial_path_precheck ( RelOptInfo parent_rel,
int  disabled_nodes,
Cost  total_cost,
List pathkeys 
)

Definition at line 921 of file pathnode.c.

923{
924 ListCell *p1;
925
926 /*
927 * Our goal here is twofold. First, we want to find out whether this path
928 * is clearly inferior to some existing partial path. If so, we want to
929 * reject it immediately. Second, we want to find out whether this path
930 * is clearly superior to some existing partial path -- at least, modulo
931 * final cost computations. If so, we definitely want to consider it.
932 *
933 * Unlike add_path(), we always compare pathkeys here. This is because we
934 * expect partial_pathlist to be very short, and getting a definitive
935 * answer at this stage avoids the need to call add_path_precheck.
936 */
937 foreach(p1, parent_rel->partial_pathlist)
938 {
939 Path *old_path = (Path *) lfirst(p1);
940 PathKeysComparison keyscmp;
941
942 keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
943 if (keyscmp != PATHKEYS_DIFFERENT)
944 {
945 if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
946 keyscmp != PATHKEYS_BETTER1)
947 return false;
948 if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
949 keyscmp != PATHKEYS_BETTER2)
950 return true;
951 }
952 }
953
954 /*
955 * This path is neither clearly inferior to an existing partial path nor
956 * clearly good enough that it might replace one. Compare it to
957 * non-parallel plans. If it loses even before accounting for the cost of
958 * the Gather node, we should definitely reject it.
959 *
960 * Note that we pass the total_cost to add_path_precheck twice. This is
961 * because it's never advantageous to consider the startup cost of a
962 * partial path; the resulting plans, if run in parallel, will be run to
963 * completion.
964 */
965 if (!add_path_precheck(parent_rel, disabled_nodes, total_cost, total_cost,
966 pathkeys, NULL))
967 return false;
968
969 return true;
970}
bool add_path_precheck(RelOptInfo *parent_rel, int disabled_nodes, Cost startup_cost, Cost total_cost, List *pathkeys, Relids required_outer)
Definition: pathnode.c:688

References add_path_precheck(), compare_pathkeys(), lfirst, RelOptInfo::partial_pathlist, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by try_partial_hashjoin_path(), try_partial_mergejoin_path(), and try_partial_nestloop_path().

◆ add_path()

void add_path ( RelOptInfo parent_rel,
Path new_path 
)

Definition at line 461 of file pathnode.c.

462{
463 bool accept_new = true; /* unless we find a superior old path */
464 int insert_at = 0; /* where to insert new item */
465 List *new_path_pathkeys;
466 ListCell *p1;
467
468 /*
469 * This is a convenient place to check for query cancel --- no part of the
470 * planner goes very long without calling add_path().
471 */
473
474 /* Pretend parameterized paths have no pathkeys, per comment above */
475 new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
476
477 /*
478 * Loop to check proposed new path against old paths. Note it is possible
479 * for more than one old path to be tossed out because new_path dominates
480 * it.
481 */
482 foreach(p1, parent_rel->pathlist)
483 {
484 Path *old_path = (Path *) lfirst(p1);
485 bool remove_old = false; /* unless new proves superior */
486 PathCostComparison costcmp;
487 PathKeysComparison keyscmp;
488 BMS_Comparison outercmp;
489
490 /*
491 * Do a fuzzy cost comparison with standard fuzziness limit.
492 */
493 costcmp = compare_path_costs_fuzzily(new_path, old_path,
495
496 /*
497 * If the two paths compare differently for startup and total cost,
498 * then we want to keep both, and we can skip comparing pathkeys and
499 * required_outer rels. If they compare the same, proceed with the
500 * other comparisons. Row count is checked last. (We make the tests
501 * in this order because the cost comparison is most likely to turn
502 * out "different", and the pathkeys comparison next most likely. As
503 * explained above, row count very seldom makes a difference, so even
504 * though it's cheap to compare there's not much point in checking it
505 * earlier.)
506 */
507 if (costcmp != COSTS_DIFFERENT)
508 {
509 /* Similarly check to see if either dominates on pathkeys */
510 List *old_path_pathkeys;
511
512 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
513 keyscmp = compare_pathkeys(new_path_pathkeys,
514 old_path_pathkeys);
515 if (keyscmp != PATHKEYS_DIFFERENT)
516 {
517 switch (costcmp)
518 {
519 case COSTS_EQUAL:
520 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
521 PATH_REQ_OUTER(old_path));
522 if (keyscmp == PATHKEYS_BETTER1)
523 {
524 if ((outercmp == BMS_EQUAL ||
525 outercmp == BMS_SUBSET1) &&
526 new_path->rows <= old_path->rows &&
527 new_path->parallel_safe >= old_path->parallel_safe)
528 remove_old = true; /* new dominates old */
529 }
530 else if (keyscmp == PATHKEYS_BETTER2)
531 {
532 if ((outercmp == BMS_EQUAL ||
533 outercmp == BMS_SUBSET2) &&
534 new_path->rows >= old_path->rows &&
535 new_path->parallel_safe <= old_path->parallel_safe)
536 accept_new = false; /* old dominates new */
537 }
538 else /* keyscmp == PATHKEYS_EQUAL */
539 {
540 if (outercmp == BMS_EQUAL)
541 {
542 /*
543 * Same pathkeys and outer rels, and fuzzily
544 * the same cost, so keep just one; to decide
545 * which, first check parallel-safety, then
546 * rows, then do a fuzzy cost comparison with
547 * very small fuzz limit. (We used to do an
548 * exact cost comparison, but that results in
549 * annoying platform-specific plan variations
550 * due to roundoff in the cost estimates.) If
551 * things are still tied, arbitrarily keep
552 * only the old path. Notice that we will
553 * keep only the old path even if the
554 * less-fuzzy comparison decides the startup
555 * and total costs compare differently.
556 */
557 if (new_path->parallel_safe >
558 old_path->parallel_safe)
559 remove_old = true; /* new dominates old */
560 else if (new_path->parallel_safe <
561 old_path->parallel_safe)
562 accept_new = false; /* old dominates new */
563 else if (new_path->rows < old_path->rows)
564 remove_old = true; /* new dominates old */
565 else if (new_path->rows > old_path->rows)
566 accept_new = false; /* old dominates new */
567 else if (compare_path_costs_fuzzily(new_path,
568 old_path,
569 1.0000000001) == COSTS_BETTER1)
570 remove_old = true; /* new dominates old */
571 else
572 accept_new = false; /* old equals or
573 * dominates new */
574 }
575 else if (outercmp == BMS_SUBSET1 &&
576 new_path->rows <= old_path->rows &&
577 new_path->parallel_safe >= old_path->parallel_safe)
578 remove_old = true; /* new dominates old */
579 else if (outercmp == BMS_SUBSET2 &&
580 new_path->rows >= old_path->rows &&
581 new_path->parallel_safe <= old_path->parallel_safe)
582 accept_new = false; /* old dominates new */
583 /* else different parameterizations, keep both */
584 }
585 break;
586 case COSTS_BETTER1:
587 if (keyscmp != PATHKEYS_BETTER2)
588 {
589 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
590 PATH_REQ_OUTER(old_path));
591 if ((outercmp == BMS_EQUAL ||
592 outercmp == BMS_SUBSET1) &&
593 new_path->rows <= old_path->rows &&
594 new_path->parallel_safe >= old_path->parallel_safe)
595 remove_old = true; /* new dominates old */
596 }
597 break;
598 case COSTS_BETTER2:
599 if (keyscmp != PATHKEYS_BETTER1)
600 {
601 outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
602 PATH_REQ_OUTER(old_path));
603 if ((outercmp == BMS_EQUAL ||
604 outercmp == BMS_SUBSET2) &&
605 new_path->rows >= old_path->rows &&
606 new_path->parallel_safe <= old_path->parallel_safe)
607 accept_new = false; /* old dominates new */
608 }
609 break;
610 case COSTS_DIFFERENT:
611
612 /*
613 * can't get here, but keep this case to keep compiler
614 * quiet
615 */
616 break;
617 }
618 }
619 }
620
621 /*
622 * Remove current element from pathlist if dominated by new.
623 */
624 if (remove_old)
625 {
626 parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
627 p1);
628
629 /*
630 * Delete the data pointed-to by the deleted cell, if possible
631 */
632 if (!IsA(old_path, IndexPath))
633 pfree(old_path);
634 }
635 else
636 {
637 /*
638 * new belongs after this old path if it has more disabled nodes
639 * or if it has the same number of nodes but a greater total cost
640 */
641 if (new_path->disabled_nodes > old_path->disabled_nodes ||
642 (new_path->disabled_nodes == old_path->disabled_nodes &&
643 new_path->total_cost >= old_path->total_cost))
644 insert_at = foreach_current_index(p1) + 1;
645 }
646
647 /*
648 * If we found an old path that dominates new_path, we can quit
649 * scanning the pathlist; we will not add new_path, and we assume
650 * new_path cannot dominate any other elements of the pathlist.
651 */
652 if (!accept_new)
653 break;
654 }
655
656 if (accept_new)
657 {
658 /* Accept the new path: insert it at proper place in pathlist */
659 parent_rel->pathlist =
660 list_insert_nth(parent_rel->pathlist, insert_at, new_path);
661 }
662 else
663 {
664 /* Reject and recycle the new path */
665 if (!IsA(new_path, IndexPath))
666 pfree(new_path);
667 }
668}
BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:445
BMS_Comparison
Definition: bitmapset.h:61
@ BMS_SUBSET1
Definition: bitmapset.h:63
@ BMS_EQUAL
Definition: bitmapset.h:62
@ BMS_SUBSET2
Definition: bitmapset.h:64
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
static PathCostComparison compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
Definition: pathnode.c:183
PathCostComparison
Definition: pathnode.c:37
@ COSTS_EQUAL
Definition: pathnode.c:38
@ COSTS_BETTER1
Definition: pathnode.c:39
@ COSTS_BETTER2
Definition: pathnode.c:40
@ COSTS_DIFFERENT
Definition: pathnode.c:41
#define PATH_REQ_OUTER(path)
Definition: pathnodes.h:1917
#define NIL
Definition: pg_list.h:68
Definition: pg_list.h:54
Cardinality rows
Definition: pathnodes.h:1907
List * pathlist
Definition: pathnodes.h:954

References BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), CHECK_FOR_INTERRUPTS, compare_path_costs_fuzzily(), compare_pathkeys(), COSTS_BETTER1, COSTS_BETTER2, COSTS_DIFFERENT, COSTS_EQUAL, Path::disabled_nodes, foreach_current_index, foreach_delete_current, IsA, lfirst, list_insert_nth(), NIL, Path::parallel_safe, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER1, PATHKEYS_BETTER2, PATHKEYS_DIFFERENT, RelOptInfo::pathlist, pfree(), Path::rows, STD_FUZZ_FACTOR, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), add_foreign_ordered_paths(), add_paths_to_append_rel(), add_paths_to_grouping_rel(), add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), BuildParameterizedTidPaths(), consider_groupingsets_paths(), create_degenerate_grouping_paths(), create_final_distinct_paths(), create_final_unique_paths(), create_index_paths(), create_one_window_path(), create_ordered_paths(), create_partial_grouping_paths(), create_tidscan_paths(), fileGetForeignPaths(), gather_grouping_paths(), generate_gather_paths(), generate_grouped_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_recursion_path(), generate_union_paths(), generate_useful_gather_paths(), get_index_paths(), grouping_planner(), mark_dummy_rel(), postgresGetForeignJoinPaths(), postgresGetForeignPaths(), preprocess_minmax_aggregates(), query_planner(), set_cte_pathlist(), set_dummy_rel_pathlist(), set_function_pathlist(), set_namedtuplestore_pathlist(), set_plain_rel_pathlist(), set_result_pathlist(), set_subquery_pathlist(), set_tablefunc_pathlist(), set_tablesample_rel_pathlist(), set_values_pathlist(), set_worktable_pathlist(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ add_path_precheck()

bool add_path_precheck ( RelOptInfo parent_rel,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer 
)

Definition at line 688 of file pathnode.c.

691{
692 List *new_path_pathkeys;
693 bool consider_startup;
694 ListCell *p1;
695
696 /* Pretend parameterized paths have no pathkeys, per add_path policy */
697 new_path_pathkeys = required_outer ? NIL : pathkeys;
698
699 /* Decide whether new path's startup cost is interesting */
700 consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
701
702 foreach(p1, parent_rel->pathlist)
703 {
704 Path *old_path = (Path *) lfirst(p1);
705 PathKeysComparison keyscmp;
706
707 /*
708 * Since the pathlist is sorted by disabled_nodes and then by
709 * total_cost, we can stop looking once we reach a path with more
710 * disabled nodes, or the same number of disabled nodes plus a
711 * total_cost larger than the new path's.
712 */
713 if (unlikely(old_path->disabled_nodes != disabled_nodes))
714 {
715 if (disabled_nodes < old_path->disabled_nodes)
716 break;
717 }
718 else if (total_cost <= old_path->total_cost * STD_FUZZ_FACTOR)
719 break;
720
721 /*
722 * We are looking for an old_path with the same parameterization (and
723 * by assumption the same rowcount) that dominates the new path on
724 * pathkeys as well as both cost metrics. If we find one, we can
725 * reject the new path.
726 *
727 * Cost comparisons here should match compare_path_costs_fuzzily.
728 */
729 /* new path can win on startup cost only if consider_startup */
730 if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
731 !consider_startup)
732 {
733 /* new path loses on cost, so check pathkeys... */
734 List *old_path_pathkeys;
735
736 old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
737 keyscmp = compare_pathkeys(new_path_pathkeys,
738 old_path_pathkeys);
739 if (keyscmp == PATHKEYS_EQUAL ||
740 keyscmp == PATHKEYS_BETTER2)
741 {
742 /* new path does not win on pathkeys... */
743 if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
744 {
745 /* Found an old path that dominates the new one */
746 return false;
747 }
748 }
749 }
750 }
751
752 return true;
753}
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
@ PATHKEYS_EQUAL
Definition: paths.h:212
Cost startup_cost
Definition: pathnodes.h:1909
bool consider_param_startup
Definition: pathnodes.h:941
bool consider_startup
Definition: pathnodes.h:939

References bms_equal(), compare_pathkeys(), RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, Path::disabled_nodes, lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, PATHKEYS_EQUAL, RelOptInfo::pathlist, Path::startup_cost, STD_FUZZ_FACTOR, and unlikely.

Referenced by add_partial_path_precheck(), try_hashjoin_path(), try_mergejoin_path(), and try_nestloop_path().

◆ adjust_limit_rows_costs()

void adjust_limit_rows_costs ( double *  rows,
Cost startup_cost,
Cost total_cost,
int64  offset_est,
int64  count_est 
)

Definition at line 3785 of file pathnode.c.

3790{
3791 double input_rows = *rows;
3792 Cost input_startup_cost = *startup_cost;
3793 Cost input_total_cost = *total_cost;
3794
3795 if (offset_est != 0)
3796 {
3797 double offset_rows;
3798
3799 if (offset_est > 0)
3800 offset_rows = (double) offset_est;
3801 else
3802 offset_rows = clamp_row_est(input_rows * 0.10);
3803 if (offset_rows > *rows)
3804 offset_rows = *rows;
3805 if (input_rows > 0)
3806 *startup_cost +=
3807 (input_total_cost - input_startup_cost)
3808 * offset_rows / input_rows;
3809 *rows -= offset_rows;
3810 if (*rows < 1)
3811 *rows = 1;
3812 }
3813
3814 if (count_est != 0)
3815 {
3816 double count_rows;
3817
3818 if (count_est > 0)
3819 count_rows = (double) count_est;
3820 else
3821 count_rows = clamp_row_est(input_rows * 0.10);
3822 if (count_rows > *rows)
3823 count_rows = *rows;
3824 if (input_rows > 0)
3825 *total_cost = *startup_cost +
3826 (input_total_cost - input_startup_cost)
3827 * count_rows / input_rows;
3828 *rows = count_rows;
3829 if (*rows < 1)
3830 *rows = 1;
3831 }
3832}
double clamp_row_est(double nrows)
Definition: costsize.c:213
double Cost
Definition: nodes.h:261

References clamp_row_est().

Referenced by create_limit_path(), and estimate_path_cost_size().

◆ apply_projection_to_path()

Path * apply_projection_to_path ( PlannerInfo root,
RelOptInfo rel,
Path path,
PathTarget target 
)

Definition at line 2633 of file pathnode.c.

2637{
2638 QualCost oldcost;
2639
2640 /*
2641 * If given path can't project, we might need a Result node, so make a
2642 * separate ProjectionPath.
2643 */
2644 if (!is_projection_capable_path(path))
2645 return (Path *) create_projection_path(root, rel, path, target);
2646
2647 /*
2648 * We can just jam the desired tlist into the existing path, being sure to
2649 * update its cost estimates appropriately.
2650 */
2651 oldcost = path->pathtarget->cost;
2652 path->pathtarget = target;
2653
2654 path->startup_cost += target->cost.startup - oldcost.startup;
2655 path->total_cost += target->cost.startup - oldcost.startup +
2656 (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2657
2658 /*
2659 * If the path happens to be a Gather or GatherMerge path, we'd like to
2660 * arrange for the subpath to return the required target list so that
2661 * workers can help project. But if there is something that is not
2662 * parallel-safe in the target expressions, then we can't.
2663 */
2664 if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2665 is_parallel_safe(root, (Node *) target->exprs))
2666 {
2667 /*
2668 * We always use create_projection_path here, even if the subpath is
2669 * projection-capable, so as to avoid modifying the subpath in place.
2670 * It seems unlikely at present that there could be any other
2671 * references to the subpath, but better safe than sorry.
2672 *
2673 * Note that we don't change the parallel path's cost estimates; it
2674 * might be appropriate to do so, to reflect the fact that the bulk of
2675 * the target evaluation will happen in workers.
2676 */
2677 if (IsA(path, GatherPath))
2678 {
2679 GatherPath *gpath = (GatherPath *) path;
2680
2681 gpath->subpath = (Path *)
2683 gpath->subpath->parent,
2684 gpath->subpath,
2685 target);
2686 }
2687 else
2688 {
2689 GatherMergePath *gmpath = (GatherMergePath *) path;
2690
2691 gmpath->subpath = (Path *)
2693 gmpath->subpath->parent,
2694 gmpath->subpath,
2695 target);
2696 }
2697 }
2698 else if (path->parallel_safe &&
2699 !is_parallel_safe(root, (Node *) target->exprs))
2700 {
2701 /*
2702 * We're inserting a parallel-restricted target list into a path
2703 * currently marked parallel-safe, so we have to mark it as no longer
2704 * safe.
2705 */
2706 path->parallel_safe = false;
2707 }
2708
2709 return path;
2710}
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:763
bool is_projection_capable_path(Path *path)
Definition: createplan.c:7217
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2524
tree ctl root
Definition: radixtree.h:1857
Path * subpath
Definition: pathnodes.h:2264
Definition: nodes.h:135
List * exprs
Definition: pathnodes.h:1780
QualCost cost
Definition: pathnodes.h:1786
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47

References PathTarget::cost, create_projection_path(), PathTarget::exprs, if(), is_parallel_safe(), is_projection_capable_path(), IsA, Path::parallel_safe, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, GatherPath::subpath, GatherMergePath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs(), build_minmax_path(), create_ordered_paths(), and recurse_set_operations().

◆ build_child_join_rel()

RelOptInfo * build_child_join_rel ( PlannerInfo root,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
RelOptInfo parent_joinrel,
List restrictlist,
SpecialJoinInfo sjinfo,
int  nappinfos,
AppendRelInfo **  appinfos 
)

Definition at line 1001 of file relnode.c.

1005{
1006 RelOptInfo *joinrel = makeNode(RelOptInfo);
1007
1008 /* Only joins between "other" relations land here. */
1009 Assert(IS_OTHER_REL(outer_rel) && IS_OTHER_REL(inner_rel));
1010
1011 /* The parent joinrel should have consider_partitionwise_join set. */
1012 Assert(parent_joinrel->consider_partitionwise_join);
1013
1015 joinrel->relids = adjust_child_relids(parent_joinrel->relids,
1016 nappinfos, appinfos);
1017 joinrel->rows = 0;
1018 /* cheap startup cost is interesting iff not all tuples to be retrieved */
1019 joinrel->consider_startup = (root->tuple_fraction > 0);
1020 joinrel->consider_param_startup = false;
1021 joinrel->consider_parallel = false;
1023 joinrel->pathlist = NIL;
1024 joinrel->ppilist = NIL;
1025 joinrel->partial_pathlist = NIL;
1026 joinrel->cheapest_startup_path = NULL;
1027 joinrel->cheapest_total_path = NULL;
1029 joinrel->direct_lateral_relids = NULL;
1030 joinrel->lateral_relids = NULL;
1031 joinrel->relid = 0; /* indicates not a baserel */
1032 joinrel->rtekind = RTE_JOIN;
1033 joinrel->min_attr = 0;
1034 joinrel->max_attr = 0;
1035 joinrel->attr_needed = NULL;
1036 joinrel->attr_widths = NULL;
1037 joinrel->notnullattnums = NULL;
1038 joinrel->nulling_relids = NULL;
1039 joinrel->lateral_vars = NIL;
1040 joinrel->lateral_referencers = NULL;
1041 joinrel->indexlist = NIL;
1042 joinrel->pages = 0;
1043 joinrel->tuples = 0;
1044 joinrel->allvisfrac = 0;
1045 joinrel->eclass_indexes = NULL;
1046 joinrel->subroot = NULL;
1047 joinrel->subplan_params = NIL;
1048 joinrel->amflags = 0;
1049 joinrel->serverid = InvalidOid;
1050 joinrel->userid = InvalidOid;
1051 joinrel->useridiscurrent = false;
1052 joinrel->fdwroutine = NULL;
1053 joinrel->fdw_private = NULL;
1054 joinrel->unique_rel = NULL;
1055 joinrel->unique_pathkeys = NIL;
1056 joinrel->unique_groupclause = NIL;
1057 joinrel->baserestrictinfo = NIL;
1058 joinrel->baserestrictcost.startup = 0;
1059 joinrel->baserestrictcost.per_tuple = 0;
1060 joinrel->joininfo = NIL;
1061 joinrel->has_eclass_joins = false;
1062 joinrel->consider_partitionwise_join = false; /* might get changed later */
1063 joinrel->agg_info = NULL;
1064 joinrel->grouped_rel = NULL;
1065 joinrel->parent = parent_joinrel;
1066 joinrel->top_parent = parent_joinrel->top_parent ? parent_joinrel->top_parent : parent_joinrel;
1067 joinrel->top_parent_relids = joinrel->top_parent->relids;
1068 joinrel->part_scheme = NULL;
1069 joinrel->nparts = -1;
1070 joinrel->boundinfo = NULL;
1071 joinrel->partbounds_merged = false;
1072 joinrel->partition_qual = NIL;
1073 joinrel->part_rels = NULL;
1074 joinrel->live_parts = NULL;
1075 joinrel->all_partrels = NULL;
1076 joinrel->partexprs = NULL;
1077 joinrel->nullable_partexprs = NULL;
1078
1079 /* Compute information relevant to foreign relations. */
1080 set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
1081
1082 /* Set up reltarget struct */
1083 build_child_join_reltarget(root, parent_joinrel, joinrel,
1084 nappinfos, appinfos);
1085
1086 /* Construct joininfo list. */
1088 (Node *) parent_joinrel->joininfo,
1089 nappinfos,
1090 appinfos);
1091
1092 /*
1093 * Lateral relids referred in child join will be same as that referred in
1094 * the parent relation.
1095 */
1096 joinrel->direct_lateral_relids = (Relids) bms_copy(parent_joinrel->direct_lateral_relids);
1097 joinrel->lateral_relids = (Relids) bms_copy(parent_joinrel->lateral_relids);
1098
1099 /*
1100 * If the parent joinrel has pending equivalence classes, so does the
1101 * child.
1102 */
1103 joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins;
1104
1105 /* Is the join between partitions itself partitioned? */
1106 build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
1107 restrictlist);
1108
1109 /* Child joinrel is parallel safe if parent is parallel safe. */
1110 joinrel->consider_parallel = parent_joinrel->consider_parallel;
1111
1112 /* Set estimates of the child-joinrel's size. */
1113 set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
1114 sjinfo, restrictlist);
1115
1116 /* We build the join only once. */
1117 Assert(!find_join_rel(root, joinrel->relids));
1118
1119 /* Add the relation to the PlannerInfo. */
1120 add_join_rel(root, joinrel);
1121
1122 /*
1123 * We might need EquivalenceClass members corresponding to the child join,
1124 * so that we can represent sort pathkeys for it. As with children of
1125 * baserels, we shouldn't need this unless there are relevant eclass joins
1126 * (implying that a merge join might be possible) or pathkeys to sort by.
1127 */
1128 if (joinrel->has_eclass_joins || has_useful_pathkeys(root, parent_joinrel))
1130 nappinfos, appinfos,
1131 parent_joinrel, joinrel);
1132
1133 return joinrel;
1134}
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
Relids adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:625
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: costsize.c:5437
void add_child_join_rel_equivalences(PlannerInfo *root, int nappinfos, AppendRelInfo **appinfos, RelOptInfo *parent_joinrel, RelOptInfo *child_joinrel)
Definition: equivclass.c:2941
#define makeNode(_type_)
Definition: nodes.h:161
@ RTE_JOIN
Definition: parsenodes.h:1045
bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel)
Definition: pathkeys.c:2291
Bitmapset * Relids
Definition: pathnodes.h:30
@ RELOPT_OTHER_JOINREL
Definition: pathnodes.h:886
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:910
#define InvalidOid
Definition: postgres_ext.h:37
static void build_joinrel_partition_info(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo, List *restrictlist)
Definition: relnode.c:2113
RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids)
Definition: relnode.c:642
static void build_child_join_reltarget(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, int nappinfos, AppendRelInfo **appinfos)
Definition: relnode.c:2626
static void set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:704
static void add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
Definition: relnode.c:742
List * baserestrictinfo
Definition: pathnodes.h:1046
List * subplan_params
Definition: pathnodes.h:1005
List * ppilist
Definition: pathnodes.h:955
bool useridiscurrent
Definition: pathnodes.h:1019
uint32 amflags
Definition: pathnodes.h:1009
List * joininfo
Definition: pathnodes.h:1052
Bitmapset * notnullattnums
Definition: pathnodes.h:987
List * partition_qual
Definition: pathnodes.h:1096
Relids relids
Definition: pathnodes.h:927
struct PathTarget * reltarget
Definition: pathnodes.h:949
Index relid
Definition: pathnodes.h:973
List * lateral_vars
Definition: pathnodes.h:991
struct RelAggInfo * agg_info
Definition: pathnodes.h:1066
List * unique_pathkeys
Definition: pathnodes.h:1038
Cardinality tuples
Definition: pathnodes.h:1000
Relids top_parent_relids
Definition: pathnodes.h:1078
bool partbounds_merged
Definition: pathnodes.h:1094
BlockNumber pages
Definition: pathnodes.h:999
Relids lateral_relids
Definition: pathnodes.h:968
List * cheapest_parameterized_paths
Definition: pathnodes.h:959
RelOptKind reloptkind
Definition: pathnodes.h:921
List * indexlist
Definition: pathnodes.h:995
Relids lateral_referencers
Definition: pathnodes.h:993
struct Path * cheapest_startup_path
Definition: pathnodes.h:957
QualCost baserestrictcost
Definition: pathnodes.h:1048
struct Path * cheapest_total_path
Definition: pathnodes.h:958
List * unique_groupclause
Definition: pathnodes.h:1040
struct RelOptInfo * grouped_rel
Definition: pathnodes.h:1068
Bitmapset * eclass_indexes
Definition: pathnodes.h:1003
Relids all_partrels
Definition: pathnodes.h:1110
Relids direct_lateral_relids
Definition: pathnodes.h:966
bool has_eclass_joins
Definition: pathnodes.h:1054
Oid serverid
Definition: pathnodes.h:1015
Bitmapset * live_parts
Definition: pathnodes.h:1108
bool consider_partitionwise_join
Definition: pathnodes.h:1060
PlannerInfo * subroot
Definition: pathnodes.h:1004
AttrNumber max_attr
Definition: pathnodes.h:981
Relids nulling_relids
Definition: pathnodes.h:989
double allvisfrac
Definition: pathnodes.h:1001
struct RelOptInfo * unique_rel
Definition: pathnodes.h:1036
Cardinality rows
Definition: pathnodes.h:933
AttrNumber min_attr
Definition: pathnodes.h:979
RTEKind rtekind
Definition: pathnodes.h:977
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681

References add_child_join_rel_equivalences(), add_join_rel(), adjust_appendrel_attrs(), adjust_child_relids(), RelOptInfo::agg_info, RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert(), RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), build_child_join_reltarget(), build_joinrel_partition_info(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, find_join_rel(), RelOptInfo::grouped_rel, RelOptInfo::has_eclass_joins, has_useful_pathkeys(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::relid, RelOptInfo::relids, RELOPT_OTHER_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_groupclause, RelOptInfo::unique_pathkeys, RelOptInfo::unique_rel, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by try_partitionwise_join().

◆ build_grouped_rel()

RelOptInfo * build_grouped_rel ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 484 of file relnode.c.

485{
486 RelOptInfo *grouped_rel;
487
488 grouped_rel = makeNode(RelOptInfo);
489 memcpy(grouped_rel, rel, sizeof(RelOptInfo));
490
491 /*
492 * clear path info
493 */
494 grouped_rel->pathlist = NIL;
495 grouped_rel->ppilist = NIL;
496 grouped_rel->partial_pathlist = NIL;
497 grouped_rel->cheapest_startup_path = NULL;
498 grouped_rel->cheapest_total_path = NULL;
499 grouped_rel->cheapest_parameterized_paths = NIL;
500
501 /*
502 * clear partition info
503 */
504 grouped_rel->part_scheme = NULL;
505 grouped_rel->nparts = -1;
506 grouped_rel->boundinfo = NULL;
507 grouped_rel->partbounds_merged = false;
508 grouped_rel->partition_qual = NIL;
509 grouped_rel->part_rels = NULL;
510 grouped_rel->live_parts = NULL;
511 grouped_rel->all_partrels = NULL;
512 grouped_rel->partexprs = NULL;
513 grouped_rel->nullable_partexprs = NULL;
514 grouped_rel->consider_partitionwise_join = false;
515
516 /*
517 * clear size estimates
518 */
519 grouped_rel->rows = 0;
520
521 return grouped_rel;
522}

References RelOptInfo::all_partrels, RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::consider_partitionwise_join, RelOptInfo::live_parts, makeNode, NIL, RelOptInfo::nparts, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, RelOptInfo::ppilist, and RelOptInfo::rows.

Referenced by build_simple_grouped_rel(), and make_grouped_join_rel().

◆ build_join_rel()

RelOptInfo * build_join_rel ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel,
SpecialJoinInfo sjinfo,
List pushed_down_joins,
List **  restrictlist_ptr 
)

Definition at line 780 of file relnode.c.

787{
788 RelOptInfo *joinrel;
789 List *restrictlist;
790
791 /* This function should be used only for join between parents. */
792 Assert(!IS_OTHER_REL(outer_rel) && !IS_OTHER_REL(inner_rel));
793
794 /*
795 * See if we already have a joinrel for this set of base rels.
796 */
797 joinrel = find_join_rel(root, joinrelids);
798
799 if (joinrel)
800 {
801 /*
802 * Yes, so we only need to figure the restrictlist for this particular
803 * pair of component relations.
804 */
805 if (restrictlist_ptr)
806 *restrictlist_ptr = build_joinrel_restrictlist(root,
807 joinrel,
808 outer_rel,
809 inner_rel,
810 sjinfo);
811 return joinrel;
812 }
813
814 /*
815 * Nope, so make one.
816 */
817 joinrel = makeNode(RelOptInfo);
818 joinrel->reloptkind = RELOPT_JOINREL;
819 joinrel->relids = bms_copy(joinrelids);
820 joinrel->rows = 0;
821 /* cheap startup cost is interesting iff not all tuples to be retrieved */
822 joinrel->consider_startup = (root->tuple_fraction > 0);
823 joinrel->consider_param_startup = false;
824 joinrel->consider_parallel = false;
826 joinrel->pathlist = NIL;
827 joinrel->ppilist = NIL;
828 joinrel->partial_pathlist = NIL;
829 joinrel->cheapest_startup_path = NULL;
830 joinrel->cheapest_total_path = NULL;
832 /* init direct_lateral_relids from children; we'll finish it up below */
833 joinrel->direct_lateral_relids =
835 inner_rel->direct_lateral_relids);
837 outer_rel, inner_rel);
838 joinrel->relid = 0; /* indicates not a baserel */
839 joinrel->rtekind = RTE_JOIN;
840 joinrel->min_attr = 0;
841 joinrel->max_attr = 0;
842 joinrel->attr_needed = NULL;
843 joinrel->attr_widths = NULL;
844 joinrel->notnullattnums = NULL;
845 joinrel->nulling_relids = NULL;
846 joinrel->lateral_vars = NIL;
847 joinrel->lateral_referencers = NULL;
848 joinrel->indexlist = NIL;
849 joinrel->statlist = NIL;
850 joinrel->pages = 0;
851 joinrel->tuples = 0;
852 joinrel->allvisfrac = 0;
853 joinrel->eclass_indexes = NULL;
854 joinrel->subroot = NULL;
855 joinrel->subplan_params = NIL;
856 joinrel->rel_parallel_workers = -1;
857 joinrel->amflags = 0;
858 joinrel->serverid = InvalidOid;
859 joinrel->userid = InvalidOid;
860 joinrel->useridiscurrent = false;
861 joinrel->fdwroutine = NULL;
862 joinrel->fdw_private = NULL;
863 joinrel->unique_for_rels = NIL;
864 joinrel->non_unique_for_rels = NIL;
865 joinrel->unique_rel = NULL;
866 joinrel->unique_pathkeys = NIL;
867 joinrel->unique_groupclause = NIL;
868 joinrel->baserestrictinfo = NIL;
869 joinrel->baserestrictcost.startup = 0;
870 joinrel->baserestrictcost.per_tuple = 0;
871 joinrel->baserestrict_min_security = UINT_MAX;
872 joinrel->joininfo = NIL;
873 joinrel->has_eclass_joins = false;
874 joinrel->consider_partitionwise_join = false; /* might get changed later */
875 joinrel->agg_info = NULL;
876 joinrel->grouped_rel = NULL;
877 joinrel->parent = NULL;
878 joinrel->top_parent = NULL;
879 joinrel->top_parent_relids = NULL;
880 joinrel->part_scheme = NULL;
881 joinrel->nparts = -1;
882 joinrel->boundinfo = NULL;
883 joinrel->partbounds_merged = false;
884 joinrel->partition_qual = NIL;
885 joinrel->part_rels = NULL;
886 joinrel->live_parts = NULL;
887 joinrel->all_partrels = NULL;
888 joinrel->partexprs = NULL;
889 joinrel->nullable_partexprs = NULL;
890
891 /* Compute information relevant to the foreign relations. */
892 set_foreign_rel_properties(joinrel, outer_rel, inner_rel);
893
894 /*
895 * Fill the joinrel's tlist with just the Vars and PHVs that need to be
896 * output from this join (ie, are needed for higher joinclauses or final
897 * output).
898 *
899 * NOTE: the tlist order for a join rel will depend on which pair of outer
900 * and inner rels we first try to build it from. But the contents should
901 * be the same regardless.
902 */
903 build_joinrel_tlist(root, joinrel, outer_rel, sjinfo, pushed_down_joins,
904 (sjinfo->jointype == JOIN_FULL));
905 build_joinrel_tlist(root, joinrel, inner_rel, sjinfo, pushed_down_joins,
906 (sjinfo->jointype != JOIN_INNER));
907 add_placeholders_to_joinrel(root, joinrel, outer_rel, inner_rel, sjinfo);
908
909 /*
910 * add_placeholders_to_joinrel also took care of adding the ph_lateral
911 * sets of any PlaceHolderVars computed here to direct_lateral_relids, so
912 * now we can finish computing that. This is much like the computation of
913 * the transitively-closed lateral_relids in min_join_parameterization,
914 * except that here we *do* have to consider the added PHVs.
915 */
916 joinrel->direct_lateral_relids =
917 bms_del_members(joinrel->direct_lateral_relids, joinrel->relids);
918
919 /*
920 * Construct restrict and join clause lists for the new joinrel. (The
921 * caller might or might not need the restrictlist, but I need it anyway
922 * for set_joinrel_size_estimates().)
923 */
924 restrictlist = build_joinrel_restrictlist(root, joinrel,
925 outer_rel, inner_rel,
926 sjinfo);
927 if (restrictlist_ptr)
928 *restrictlist_ptr = restrictlist;
929 build_joinrel_joinlist(joinrel, outer_rel, inner_rel);
930
931 /*
932 * This is also the right place to check whether the joinrel has any
933 * pending EquivalenceClass joins.
934 */
936
937 /* Store the partition information. */
938 build_joinrel_partition_info(root, joinrel, outer_rel, inner_rel, sjinfo,
939 restrictlist);
940
941 /*
942 * Set estimates of the joinrel's size.
943 */
944 set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel,
945 sjinfo, restrictlist);
946
947 /*
948 * Set the consider_parallel flag if this joinrel could potentially be
949 * scanned within a parallel worker. If this flag is false for either
950 * inner_rel or outer_rel, then it must be false for the joinrel also.
951 * Even if both are true, there might be parallel-restricted expressions
952 * in the targetlist or quals.
953 *
954 * Note that if there are more than two rels in this relation, they could
955 * be divided between inner_rel and outer_rel in any arbitrary way. We
956 * assume this doesn't matter, because we should hit all the same baserels
957 * and joinclauses while building up to this joinrel no matter which we
958 * take; therefore, we should make the same decision here however we get
959 * here.
960 */
961 if (inner_rel->consider_parallel && outer_rel->consider_parallel &&
962 is_parallel_safe(root, (Node *) restrictlist) &&
963 is_parallel_safe(root, (Node *) joinrel->reltarget->exprs))
964 joinrel->consider_parallel = true;
965
966 /* Add the joinrel to the PlannerInfo. */
967 add_join_rel(root, joinrel);
968
969 /*
970 * Also, if dynamic-programming join search is active, add the new joinrel
971 * to the appropriate sublist. Note: you might think the Assert on number
972 * of members should be for equality, but some of the level 1 rels might
973 * have been joinrels already, so we can only assert <=.
974 */
975 if (root->join_rel_level)
976 {
977 Assert(root->join_cur_level > 0);
978 Assert(root->join_cur_level <= bms_num_members(joinrel->relids));
979 root->join_rel_level[root->join_cur_level] =
980 lappend(root->join_rel_level[root->join_cur_level], joinrel);
981 }
982
983 return joinrel;
984}
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
Bitmapset * bms_union(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:251
bool has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1)
Definition: equivclass.c:3446
List * lappend(List *list, void *datum)
Definition: list.c:339
@ JOIN_FULL
Definition: nodes.h:305
@ JOIN_INNER
Definition: nodes.h:303
@ RELOPT_JOINREL
Definition: pathnodes.h:884
void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: placeholder.c:400
static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *input_rel, SpecialJoinInfo *sjinfo, List *pushed_down_joins, bool can_null)
Definition: relnode.c:1223
Relids min_join_parameterization(PlannerInfo *root, Relids joinrelids, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1145
static List * build_joinrel_restrictlist(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: relnode.c:1408
static void build_joinrel_joinlist(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel)
Definition: relnode.c:1445
List * statlist
Definition: pathnodes.h:997
List * unique_for_rels
Definition: pathnodes.h:1028
List * non_unique_for_rels
Definition: pathnodes.h:1030
int rel_parallel_workers
Definition: pathnodes.h:1007
Index baserestrict_min_security
Definition: pathnodes.h:1050
JoinType jointype
Definition: pathnodes.h:3121

References add_join_rel(), add_placeholders_to_joinrel(), RelOptInfo::agg_info, RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, Assert(), RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_copy(), bms_del_members(), bms_num_members(), bms_union(), build_joinrel_joinlist(), build_joinrel_partition_info(), build_joinrel_restrictlist(), build_joinrel_tlist(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, PathTarget::exprs, find_join_rel(), RelOptInfo::grouped_rel, RelOptInfo::has_eclass_joins, has_relevant_eclass_joinclause(), RelOptInfo::indexlist, InvalidOid, IS_OTHER_REL, is_parallel_safe(), JOIN_FULL, JOIN_INNER, RelOptInfo::joininfo, SpecialJoinInfo::jointype, lappend(), RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, RelOptInfo::live_parts, makeNode, RelOptInfo::max_attr, RelOptInfo::min_attr, min_join_parameterization(), NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_JOINREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_JOIN, RelOptInfo::rtekind, RelOptInfo::serverid, set_foreign_rel_properties(), set_joinrel_size_estimates(), QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::unique_groupclause, RelOptInfo::unique_pathkeys, RelOptInfo::unique_rel, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by make_join_rel().

◆ build_simple_grouped_rel()

RelOptInfo * build_simple_grouped_rel ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 433 of file relnode.c.

434{
435 RelOptInfo *grouped_rel;
436 RelAggInfo *agg_info;
437
438 /*
439 * We should have available aggregate expressions and grouping
440 * expressions, otherwise we cannot reach here.
441 */
442 Assert(root->agg_clause_list != NIL);
443 Assert(root->group_expr_list != NIL);
444
445 /* nothing to do for dummy rel */
446 if (IS_DUMMY_REL(rel))
447 return NULL;
448
449 /*
450 * Prepare the information needed to create grouped paths for this simple
451 * relation.
452 */
453 agg_info = create_rel_agg_info(root, rel, true);
454 if (agg_info == NULL)
455 return NULL;
456
457 /*
458 * If grouped paths for the given simple relation are not considered
459 * useful, skip building the grouped relation.
460 */
461 if (!agg_info->agg_useful)
462 return NULL;
463
464 /* Track the set of relids at which partial aggregation is applied */
465 agg_info->apply_agg_at = bms_copy(rel->relids);
466
467 /* build the grouped relation */
468 grouped_rel = build_grouped_rel(root, rel);
469 grouped_rel->reltarget = agg_info->target;
470 grouped_rel->rows = agg_info->grouped_rows;
471 grouped_rel->agg_info = agg_info;
472
473 rel->grouped_rel = grouped_rel;
474
475 return grouped_rel;
476}
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:2194
RelOptInfo * build_grouped_rel(PlannerInfo *root, RelOptInfo *rel)
Definition: relnode.c:484
RelAggInfo * create_rel_agg_info(PlannerInfo *root, RelOptInfo *rel, bool calculate_grouped_rows)
Definition: relnode.c:2655
Relids apply_agg_at
Definition: pathnodes.h:1206
bool agg_useful
Definition: pathnodes.h:1212
Cardinality grouped_rows
Definition: pathnodes.h:1209
struct PathTarget * target
Definition: pathnodes.h:1195

References RelOptInfo::agg_info, RelAggInfo::agg_useful, RelAggInfo::apply_agg_at, Assert(), bms_copy(), build_grouped_rel(), create_rel_agg_info(), RelOptInfo::grouped_rel, RelAggInfo::grouped_rows, IS_DUMMY_REL, NIL, RelOptInfo::relids, RelOptInfo::reltarget, root, RelOptInfo::rows, and RelAggInfo::target.

Referenced by setup_simple_grouped_rels().

◆ build_simple_rel()

RelOptInfo * build_simple_rel ( PlannerInfo root,
int  relid,
RelOptInfo parent 
)

Definition at line 206 of file relnode.c.

207{
208 RelOptInfo *rel;
209 RangeTblEntry *rte;
210
211 /* Rel should not exist already */
212 Assert(relid > 0 && relid < root->simple_rel_array_size);
213 if (root->simple_rel_array[relid] != NULL)
214 elog(ERROR, "rel %d already exists", relid);
215
216 /* Fetch RTE for relation */
217 rte = root->simple_rte_array[relid];
218 Assert(rte != NULL);
219
220 rel = makeNode(RelOptInfo);
222 rel->relids = bms_make_singleton(relid);
223 rel->rows = 0;
224 /* cheap startup cost is interesting iff not all tuples to be retrieved */
225 rel->consider_startup = (root->tuple_fraction > 0);
226 rel->consider_param_startup = false; /* might get changed later */
227 rel->consider_parallel = false; /* might get changed later */
229 rel->pathlist = NIL;
230 rel->ppilist = NIL;
231 rel->partial_pathlist = NIL;
232 rel->cheapest_startup_path = NULL;
233 rel->cheapest_total_path = NULL;
235 rel->relid = relid;
236 rel->rtekind = rte->rtekind;
237 /* min_attr, max_attr, attr_needed, attr_widths are set below */
238 rel->notnullattnums = NULL;
239 rel->lateral_vars = NIL;
240 rel->indexlist = NIL;
241 rel->statlist = NIL;
242 rel->pages = 0;
243 rel->tuples = 0;
244 rel->allvisfrac = 0;
245 rel->eclass_indexes = NULL;
246 rel->subroot = NULL;
247 rel->subplan_params = NIL;
248 rel->rel_parallel_workers = -1; /* set up in get_relation_info */
249 rel->amflags = 0;
250 rel->serverid = InvalidOid;
251 if (rte->rtekind == RTE_RELATION)
252 {
253 Assert(parent == NULL ||
254 parent->rtekind == RTE_RELATION ||
255 parent->rtekind == RTE_SUBQUERY);
256
257 /*
258 * For any RELATION rte, we need a userid with which to check
259 * permission access. Baserels simply use their own
260 * RTEPermissionInfo's checkAsUser.
261 *
262 * For otherrels normally there's no RTEPermissionInfo, so we use the
263 * parent's, which normally has one. The exceptional case is that the
264 * parent is a subquery, in which case the otherrel will have its own.
265 */
266 if (rel->reloptkind == RELOPT_BASEREL ||
268 parent->rtekind == RTE_SUBQUERY))
269 {
270 RTEPermissionInfo *perminfo;
271
272 perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte);
273 rel->userid = perminfo->checkAsUser;
274 }
275 else
276 rel->userid = parent->userid;
277 }
278 else
279 rel->userid = InvalidOid;
280 rel->useridiscurrent = false;
281 rel->fdwroutine = NULL;
282 rel->fdw_private = NULL;
283 rel->unique_for_rels = NIL;
285 rel->unique_rel = NULL;
286 rel->unique_pathkeys = NIL;
287 rel->unique_groupclause = NIL;
288 rel->baserestrictinfo = NIL;
289 rel->baserestrictcost.startup = 0;
291 rel->baserestrict_min_security = UINT_MAX;
292 rel->joininfo = NIL;
293 rel->has_eclass_joins = false;
294 rel->consider_partitionwise_join = false; /* might get changed later */
295 rel->agg_info = NULL;
296 rel->grouped_rel = NULL;
297 rel->part_scheme = NULL;
298 rel->nparts = -1;
299 rel->boundinfo = NULL;
300 rel->partbounds_merged = false;
301 rel->partition_qual = NIL;
302 rel->part_rels = NULL;
303 rel->live_parts = NULL;
304 rel->all_partrels = NULL;
305 rel->partexprs = NULL;
306 rel->nullable_partexprs = NULL;
307
308 /*
309 * Pass assorted information down the inheritance hierarchy.
310 */
311 if (parent)
312 {
313 /* We keep back-links to immediate parent and topmost parent. */
314 rel->parent = parent;
315 rel->top_parent = parent->top_parent ? parent->top_parent : parent;
316 rel->top_parent_relids = rel->top_parent->relids;
317
318 /*
319 * A child rel is below the same outer joins as its parent. (We
320 * presume this info was already calculated for the parent.)
321 */
322 rel->nulling_relids = parent->nulling_relids;
323
324 /*
325 * Also propagate lateral-reference information from appendrel parent
326 * rels to their child rels. We intentionally give each child rel the
327 * same minimum parameterization, even though it's quite possible that
328 * some don't reference all the lateral rels. This is because any
329 * append path for the parent will have to have the same
330 * parameterization for every child anyway, and there's no value in
331 * forcing extra reparameterize_path() calls. Similarly, a lateral
332 * reference to the parent prevents use of otherwise-movable join rels
333 * for each child.
334 *
335 * It's possible for child rels to have their own children, in which
336 * case the topmost parent's lateral info propagates all the way down.
337 */
339 rel->lateral_relids = parent->lateral_relids;
341 }
342 else
343 {
344 rel->parent = NULL;
345 rel->top_parent = NULL;
346 rel->top_parent_relids = NULL;
347 rel->nulling_relids = NULL;
348 rel->direct_lateral_relids = NULL;
349 rel->lateral_relids = NULL;
350 rel->lateral_referencers = NULL;
351 }
352
353 /* Check type of rtable entry */
354 switch (rte->rtekind)
355 {
356 case RTE_RELATION:
357 /* Table --- retrieve statistics from the system catalogs */
358 get_relation_info(root, rte->relid, rte->inh, rel);
359 break;
360 case RTE_SUBQUERY:
361 case RTE_FUNCTION:
362 case RTE_TABLEFUNC:
363 case RTE_VALUES:
364 case RTE_CTE:
366
367 /*
368 * Subquery, function, tablefunc, values list, CTE, or ENR --- set
369 * up attr range and arrays
370 *
371 * Note: 0 is included in range to support whole-row Vars
372 */
373 rel->min_attr = 0;
374 rel->max_attr = list_length(rte->eref->colnames);
375 rel->attr_needed = (Relids *)
376 palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
377 rel->attr_widths = (int32 *)
378 palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
379 break;
380 case RTE_RESULT:
381 /* RTE_RESULT has no columns, nor could it have whole-row Var */
382 rel->min_attr = 0;
383 rel->max_attr = -1;
384 rel->attr_needed = NULL;
385 rel->attr_widths = NULL;
386 break;
387 default:
388 elog(ERROR, "unrecognized RTE kind: %d",
389 (int) rte->rtekind);
390 break;
391 }
392
393 /*
394 * We must apply the partially filled in RelOptInfo before calling
395 * apply_child_basequals due to some transformations within that function
396 * which require the RelOptInfo to be available in the simple_rel_array.
397 */
398 root->simple_rel_array[relid] = rel;
399
400 /*
401 * Apply the parent's quals to the child, with appropriate substitution of
402 * variables. If the resulting clause is constant-FALSE or NULL after
403 * applying transformations, apply_child_basequals returns false to
404 * indicate that scanning this relation won't yield any rows. In this
405 * case, we mark the child as dummy right away. (We must do this
406 * immediately so that pruning works correctly when recursing in
407 * expand_partitioned_rtentry.)
408 */
409 if (parent)
410 {
411 AppendRelInfo *appinfo = root->append_rel_array[relid];
412
413 Assert(appinfo != NULL);
414 if (!apply_child_basequals(root, parent, rel, rte, appinfo))
415 {
416 /*
417 * Restriction clause reduced to constant FALSE or NULL. Mark as
418 * dummy so we won't scan this relation.
419 */
420 mark_dummy_rel(rel);
421 }
422 }
423
424 return rel;
425}
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
int32_t int32
Definition: c.h:539
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
bool apply_child_basequals(PlannerInfo *root, RelOptInfo *parentrel, RelOptInfo *childrel, RangeTblEntry *childRTE, AppendRelInfo *appinfo)
Definition: inherit.c:838
void mark_dummy_rel(RelOptInfo *rel)
Definition: joinrels.c:1513
void * palloc0(Size size)
Definition: mcxt.c:1395
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
@ RTE_CTE
Definition: parsenodes.h:1049
@ RTE_NAMEDTUPLESTORE
Definition: parsenodes.h:1050
@ RTE_VALUES
Definition: parsenodes.h:1048
@ RTE_SUBQUERY
Definition: parsenodes.h:1044
@ RTE_RESULT
Definition: parsenodes.h:1051
@ RTE_FUNCTION
Definition: parsenodes.h:1046
@ RTE_TABLEFUNC
Definition: parsenodes.h:1047
@ RTE_RELATION
Definition: parsenodes.h:1043
@ RELOPT_BASEREL
Definition: pathnodes.h:883
@ RELOPT_OTHER_MEMBER_REL
Definition: pathnodes.h:885
static int list_length(const List *l)
Definition: pg_list.h:152
void get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, RelOptInfo *rel)
Definition: plancat.c:124
RTEKind rtekind
Definition: parsenodes.h:1078

References RelOptInfo::agg_info, RelOptInfo::all_partrels, RelOptInfo::allvisfrac, RelOptInfo::amflags, apply_child_basequals(), Assert(), RelOptInfo::baserestrict_min_security, RelOptInfo::baserestrictcost, RelOptInfo::baserestrictinfo, bms_make_singleton(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RTEPermissionInfo::checkAsUser, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_partitionwise_join, RelOptInfo::consider_startup, create_empty_pathtarget(), RelOptInfo::direct_lateral_relids, RelOptInfo::eclass_indexes, elog, ERROR, get_relation_info(), getRTEPermissionInfo(), RelOptInfo::grouped_rel, RelOptInfo::has_eclass_joins, RelOptInfo::indexlist, RangeTblEntry::inh, InvalidOid, RelOptInfo::joininfo, RelOptInfo::lateral_referencers, RelOptInfo::lateral_relids, RelOptInfo::lateral_vars, list_length(), RelOptInfo::live_parts, makeNode, mark_dummy_rel(), RelOptInfo::max_attr, RelOptInfo::min_attr, NIL, RelOptInfo::non_unique_for_rels, RelOptInfo::notnullattnums, RelOptInfo::nparts, RelOptInfo::nulling_relids, RelOptInfo::pages, palloc0(), RelOptInfo::partbounds_merged, RelOptInfo::partial_pathlist, RelOptInfo::partition_qual, RelOptInfo::pathlist, QualCost::per_tuple, RelOptInfo::ppilist, RelOptInfo::rel_parallel_workers, RelOptInfo::relid, RelOptInfo::relids, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, RelOptInfo::rows, RTE_CTE, RTE_FUNCTION, RTE_NAMEDTUPLESTORE, RTE_RELATION, RTE_RESULT, RTE_SUBQUERY, RTE_TABLEFUNC, RTE_VALUES, RangeTblEntry::rtekind, RelOptInfo::rtekind, RelOptInfo::serverid, QualCost::startup, RelOptInfo::statlist, RelOptInfo::subplan_params, RelOptInfo::subroot, RelOptInfo::top_parent_relids, RelOptInfo::tuples, RelOptInfo::unique_for_rels, RelOptInfo::unique_groupclause, RelOptInfo::unique_pathkeys, RelOptInfo::unique_rel, RelOptInfo::userid, and RelOptInfo::useridiscurrent.

Referenced by add_base_rels_to_query(), expand_appendrel_subquery(), expand_inherited_rtentry(), expand_partitioned_rtentry(), plan_cluster_use_sort(), plan_create_index_workers(), query_planner(), and recurse_set_operations().

◆ calc_nestloop_required_outer()

Relids calc_nestloop_required_outer ( Relids  outerrelids,
Relids  outer_paramrels,
Relids  innerrelids,
Relids  inner_paramrels 
)

Definition at line 2214 of file pathnode.c.

2218{
2219 Relids required_outer;
2220
2221 /* inner_path can require rels from outer path, but not vice versa */
2222 Assert(!bms_overlap(outer_paramrels, innerrelids));
2223 /* easy case if inner path is not parameterized */
2224 if (!inner_paramrels)
2225 return bms_copy(outer_paramrels);
2226 /* else, form the union ... */
2227 required_outer = bms_union(outer_paramrels, inner_paramrels);
2228 /* ... and remove any mention of now-satisfied outer rels */
2229 required_outer = bms_del_members(required_outer,
2230 outerrelids);
2231 return required_outer;
2232}
bool bms_overlap(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:582

References Assert(), bms_copy(), bms_del_members(), bms_overlap(), and bms_union().

Referenced by try_nestloop_path().

◆ calc_non_nestloop_required_outer()

Relids calc_non_nestloop_required_outer ( Path outer_path,
Path inner_path 
)

Definition at line 2241 of file pathnode.c.

2242{
2243 Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2244 Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2245 Relids innerrelids PG_USED_FOR_ASSERTS_ONLY;
2246 Relids outerrelids PG_USED_FOR_ASSERTS_ONLY;
2247 Relids required_outer;
2248
2249 /*
2250 * Any parameterization of the input paths refers to topmost parents of
2251 * the relevant relations, because reparameterize_path_by_child() hasn't
2252 * been called yet. So we must consider topmost parents of the relations
2253 * being joined, too, while checking for disallowed parameterization
2254 * cases.
2255 */
2256 if (inner_path->parent->top_parent_relids)
2257 innerrelids = inner_path->parent->top_parent_relids;
2258 else
2259 innerrelids = inner_path->parent->relids;
2260
2261 if (outer_path->parent->top_parent_relids)
2262 outerrelids = outer_path->parent->top_parent_relids;
2263 else
2264 outerrelids = outer_path->parent->relids;
2265
2266 /* neither path can require rels from the other */
2267 Assert(!bms_overlap(outer_paramrels, innerrelids));
2268 Assert(!bms_overlap(inner_paramrels, outerrelids));
2269 /* form the union ... */
2270 required_outer = bms_union(outer_paramrels, inner_paramrels);
2271 /* we do not need an explicit test for empty; bms_union gets it right */
2272 return required_outer;
2273}
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:228

References Assert(), bms_overlap(), bms_union(), PATH_REQ_OUTER, and PG_USED_FOR_ASSERTS_ONLY.

Referenced by try_hashjoin_path(), and try_mergejoin_path().

◆ compare_fractional_path_costs()

int compare_fractional_path_costs ( Path path1,
Path path2,
double  fraction 
)

Definition at line 125 of file pathnode.c.

127{
128 Cost cost1,
129 cost2;
130
131 /* Number of disabled nodes, if different, trumps all else. */
132 if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
133 {
134 if (path1->disabled_nodes < path2->disabled_nodes)
135 return -1;
136 else
137 return +1;
138 }
139
140 if (fraction <= 0.0 || fraction >= 1.0)
141 return compare_path_costs(path1, path2, TOTAL_COST);
142 cost1 = path1->startup_cost +
143 fraction * (path1->total_cost - path1->startup_cost);
144 cost2 = path2->startup_cost +
145 fraction * (path2->total_cost - path2->startup_cost);
146 if (cost1 < cost2)
147 return -1;
148 if (cost1 > cost2)
149 return +1;
150 return 0;
151}
int compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
Definition: pathnode.c:70
@ TOTAL_COST
Definition: pathnodes.h:38

References compare_path_costs(), Path::disabled_nodes, Path::startup_cost, TOTAL_COST, Path::total_cost, and unlikely.

Referenced by get_cheapest_fractional_path(), and get_cheapest_fractional_path_for_pathkeys().

◆ compare_path_costs()

int compare_path_costs ( Path path1,
Path path2,
CostSelector  criterion 
)

Definition at line 70 of file pathnode.c.

71{
72 /* Number of disabled nodes, if different, trumps all else. */
73 if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
74 {
75 if (path1->disabled_nodes < path2->disabled_nodes)
76 return -1;
77 else
78 return +1;
79 }
80
81 if (criterion == STARTUP_COST)
82 {
83 if (path1->startup_cost < path2->startup_cost)
84 return -1;
85 if (path1->startup_cost > path2->startup_cost)
86 return +1;
87
88 /*
89 * If paths have the same startup cost (not at all unlikely), order
90 * them by total cost.
91 */
92 if (path1->total_cost < path2->total_cost)
93 return -1;
94 if (path1->total_cost > path2->total_cost)
95 return +1;
96 }
97 else
98 {
99 if (path1->total_cost < path2->total_cost)
100 return -1;
101 if (path1->total_cost > path2->total_cost)
102 return +1;
103
104 /*
105 * If paths have the same total cost, order them by startup cost.
106 */
107 if (path1->startup_cost < path2->startup_cost)
108 return -1;
109 if (path1->startup_cost > path2->startup_cost)
110 return +1;
111 }
112 return 0;
113}
@ STARTUP_COST
Definition: pathnodes.h:38

References Path::disabled_nodes, STARTUP_COST, Path::startup_cost, Path::total_cost, and unlikely.

Referenced by append_startup_cost_compare(), append_total_cost_compare(), compare_fractional_path_costs(), generate_mergejoin_paths(), get_cheapest_parameterized_child_path(), get_cheapest_path_for_pathkeys(), and set_cheapest().

◆ create_agg_path()

AggPath * create_agg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
AggStrategy  aggstrategy,
AggSplit  aggsplit,
List groupClause,
List qual,
const AggClauseCosts aggcosts,
double  numGroups 
)

Definition at line 2994 of file pathnode.c.

3004{
3005 AggPath *pathnode = makeNode(AggPath);
3006
3007 pathnode->path.pathtype = T_Agg;
3008 pathnode->path.parent = rel;
3009 pathnode->path.pathtarget = target;
3010 pathnode->path.param_info = subpath->param_info;
3011 pathnode->path.parallel_aware = false;
3012 pathnode->path.parallel_safe = rel->consider_parallel &&
3013 subpath->parallel_safe;
3014 pathnode->path.parallel_workers = subpath->parallel_workers;
3015
3016 if (aggstrategy == AGG_SORTED)
3017 {
3018 /*
3019 * Attempt to preserve the order of the subpath. Additional pathkeys
3020 * may have been added in adjust_group_pathkeys_for_groupagg() to
3021 * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3022 * belong to columns within the aggregate function, so we must strip
3023 * these additional pathkeys off as those columns are unavailable
3024 * above the aggregate node.
3025 */
3026 if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3027 pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3028 root->num_groupby_pathkeys);
3029 else
3030 pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3031 }
3032 else
3033 pathnode->path.pathkeys = NIL; /* output is unordered */
3034
3035 pathnode->subpath = subpath;
3036
3037 pathnode->aggstrategy = aggstrategy;
3038 pathnode->aggsplit = aggsplit;
3039 pathnode->numGroups = numGroups;
3040 pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3041 pathnode->groupClause = groupClause;
3042 pathnode->qual = qual;
3043
3044 cost_agg(&pathnode->path, root,
3045 aggstrategy, aggcosts,
3046 list_length(groupClause), numGroups,
3047 qual,
3048 subpath->disabled_nodes,
3049 subpath->startup_cost, subpath->total_cost,
3050 subpath->rows, subpath->pathtarget->width);
3051
3052 /* add tlist eval cost for each output row */
3053 pathnode->path.startup_cost += target->cost.startup;
3054 pathnode->path.total_cost += target->cost.startup +
3055 target->cost.per_tuple * pathnode->path.rows;
3056
3057 return pathnode;
3058}
void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, List *quals, int disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, double input_width)
Definition: costsize.c:2688
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
@ AGG_SORTED
Definition: nodes.h:365
Size transitionSpace
Definition: pathnodes.h:62
Path * subpath
Definition: pathnodes.h:2484
Cardinality numGroups
Definition: pathnodes.h:2487
AggSplit aggsplit
Definition: pathnodes.h:2486
List * groupClause
Definition: pathnodes.h:2489
uint64 transitionSpace
Definition: pathnodes.h:2488
AggStrategy aggstrategy
Definition: pathnodes.h:2485
Path path
Definition: pathnodes.h:2483
List * qual
Definition: pathnodes.h:2490
NodeTag pathtype
Definition: pathnodes.h:1873
int parallel_workers
Definition: pathnodes.h:1904
bool parallel_aware
Definition: pathnodes.h:1900

References AGG_SORTED, AggPath::aggsplit, AggPath::aggstrategy, RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), AggPath::groupClause, list_copy_head(), list_length(), makeNode, NIL, AggPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, AggPath::qual, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), AggPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, and AggPath::transitionSpace.

Referenced by add_paths_to_grouping_rel(), create_final_distinct_paths(), create_final_unique_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_partial_unique_paths(), generate_grouped_paths(), and generate_union_paths().

◆ create_append_path()

AppendPath * create_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List partial_subpaths,
List pathkeys,
Relids  required_outer,
int  parallel_workers,
bool  parallel_aware,
double  rows 
)

Definition at line 1300 of file pathnode.c.

1306{
1307 AppendPath *pathnode = makeNode(AppendPath);
1308 ListCell *l;
1309
1310 Assert(!parallel_aware || parallel_workers > 0);
1311
1312 pathnode->path.pathtype = T_Append;
1313 pathnode->path.parent = rel;
1314 pathnode->path.pathtarget = rel->reltarget;
1315
1316 /*
1317 * If this is for a baserel (not a join or non-leaf partition), we prefer
1318 * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1319 * for the path. This supports building a Memoize path atop this path,
1320 * and if this is a partitioned table the info may be useful for run-time
1321 * pruning (cf make_partition_pruneinfo()).
1322 *
1323 * However, if we don't have "root" then that won't work and we fall back
1324 * on the simpler get_appendrel_parampathinfo. There's no point in doing
1325 * the more expensive thing for a dummy path, either.
1326 */
1327 if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1328 pathnode->path.param_info = get_baserel_parampathinfo(root,
1329 rel,
1330 required_outer);
1331 else
1332 pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1333 required_outer);
1334
1335 pathnode->path.parallel_aware = parallel_aware;
1336 pathnode->path.parallel_safe = rel->consider_parallel;
1337 pathnode->path.parallel_workers = parallel_workers;
1338 pathnode->path.pathkeys = pathkeys;
1339
1340 /*
1341 * For parallel append, non-partial paths are sorted by descending total
1342 * costs. That way, the total time to finish all non-partial paths is
1343 * minimized. Also, the partial paths are sorted by descending startup
1344 * costs. There may be some paths that require to do startup work by a
1345 * single worker. In such case, it's better for workers to choose the
1346 * expensive ones first, whereas the leader should choose the cheapest
1347 * startup plan.
1348 */
1349 if (pathnode->path.parallel_aware)
1350 {
1351 /*
1352 * We mustn't fiddle with the order of subpaths when the Append has
1353 * pathkeys. The order they're listed in is critical to keeping the
1354 * pathkeys valid.
1355 */
1356 Assert(pathkeys == NIL);
1357
1359 list_sort(partial_subpaths, append_startup_cost_compare);
1360 }
1361 pathnode->first_partial_path = list_length(subpaths);
1362 pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1363
1364 /*
1365 * Apply query-wide LIMIT if known and path is for sole base relation.
1366 * (Handling this at this low level is a bit klugy.)
1367 */
1368 if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1369 pathnode->limit_tuples = root->limit_tuples;
1370 else
1371 pathnode->limit_tuples = -1.0;
1372
1373 foreach(l, pathnode->subpaths)
1374 {
1375 Path *subpath = (Path *) lfirst(l);
1376
1377 pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1378 subpath->parallel_safe;
1379
1380 /* All child paths must have same parameterization */
1381 Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1382 }
1383
1384 Assert(!parallel_aware || pathnode->path.parallel_safe);
1385
1386 /*
1387 * If there's exactly one child path then the output of the Append is
1388 * necessarily ordered the same as the child's, so we can inherit the
1389 * child's pathkeys if any, overriding whatever the caller might've said.
1390 * Furthermore, if the child's parallel awareness matches the Append's,
1391 * then the Append is a no-op and will be discarded later (in setrefs.c).
1392 * Then we can inherit the child's size and cost too, effectively charging
1393 * zero for the Append. Otherwise, we must do the normal costsize
1394 * calculation.
1395 */
1396 if (list_length(pathnode->subpaths) == 1)
1397 {
1398 Path *child = (Path *) linitial(pathnode->subpaths);
1399
1400 if (child->parallel_aware == parallel_aware)
1401 {
1402 pathnode->path.rows = child->rows;
1403 pathnode->path.startup_cost = child->startup_cost;
1404 pathnode->path.total_cost = child->total_cost;
1405 }
1406 else
1407 cost_append(pathnode, root);
1408 /* Must do this last, else cost_append complains */
1409 pathnode->path.pathkeys = child->pathkeys;
1410 }
1411 else
1412 cost_append(pathnode, root);
1413
1414 /* If the caller provided a row estimate, override the computed value. */
1415 if (rows >= 0)
1416 pathnode->path.rows = rows;
1417
1418 return pathnode;
1419}
void cost_append(AppendPath *apath, PlannerInfo *root)
Definition: costsize.c:2224
void list_sort(List *list, list_sort_comparator cmp)
Definition: list.c:1674
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
static int append_startup_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1453
static int append_total_cost_compare(const ListCell *a, const ListCell *b)
Definition: pathnode.c:1431
#define linitial(l)
Definition: pg_list.h:178
ParamPathInfo * get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer)
Definition: relnode.c:1978
ParamPathInfo * get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, Relids required_outer)
Definition: relnode.c:1667
int first_partial_path
Definition: pathnodes.h:2182
Cardinality limit_tuples
Definition: pathnodes.h:2183
List * subpaths
Definition: pathnodes.h:2180

References append_startup_cost_compare(), append_total_cost_compare(), Assert(), bms_equal(), RelOptInfo::consider_parallel, cost_append(), AppendPath::first_partial_path, get_appendrel_parampathinfo(), get_baserel_parampathinfo(), lfirst, AppendPath::limit_tuples, linitial, list_concat(), list_length(), list_sort(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::relids, RELOPT_BASEREL, RelOptInfo::reloptkind, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), AppendPath::subpaths, and Path::total_cost.

Referenced by add_paths_to_append_rel(), create_degenerate_grouping_paths(), generate_nonunion_paths(), generate_orderedappend_paths(), generate_union_paths(), mark_dummy_rel(), reparameterize_path(), and set_dummy_rel_pathlist().

◆ create_bitmap_and_path()

BitmapAndPath * create_bitmap_and_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1131 of file pathnode.c.

1134{
1136 Relids required_outer = NULL;
1137 ListCell *lc;
1138
1139 pathnode->path.pathtype = T_BitmapAnd;
1140 pathnode->path.parent = rel;
1141 pathnode->path.pathtarget = rel->reltarget;
1142
1143 /*
1144 * Identify the required outer rels as the union of what the child paths
1145 * depend on. (Alternatively, we could insist that the caller pass this
1146 * in, but it's more convenient and reliable to compute it here.)
1147 */
1148 foreach(lc, bitmapquals)
1149 {
1150 Path *bitmapqual = (Path *) lfirst(lc);
1151
1152 required_outer = bms_add_members(required_outer,
1153 PATH_REQ_OUTER(bitmapqual));
1154 }
1155 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1156 required_outer);
1157
1158 /*
1159 * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1160 * parallel-safe if and only if rel->consider_parallel is set. So, we can
1161 * set the flag for this path based only on the relation-level flag,
1162 * without actually iterating over the list of children.
1163 */
1164 pathnode->path.parallel_aware = false;
1165 pathnode->path.parallel_safe = rel->consider_parallel;
1166 pathnode->path.parallel_workers = 0;
1167
1168 pathnode->path.pathkeys = NIL; /* always unordered */
1169
1170 pathnode->bitmapquals = bitmapquals;
1171
1172 /* this sets bitmapselectivity as well as the regular cost fields: */
1173 cost_bitmap_and_node(pathnode, root);
1174
1175 return pathnode;
1176}
Bitmapset * bms_add_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:917
void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
Definition: costsize.c:1139
List * bitmapquals
Definition: pathnodes.h:2045

References BitmapAndPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_and_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapAndPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by bitmap_and_cost_est(), and choose_bitmap_and().

◆ create_bitmap_heap_path()

BitmapHeapPath * create_bitmap_heap_path ( PlannerInfo root,
RelOptInfo rel,
Path bitmapqual,
Relids  required_outer,
double  loop_count,
int  parallel_degree 
)

Definition at line 1098 of file pathnode.c.

1104{
1106
1107 pathnode->path.pathtype = T_BitmapHeapScan;
1108 pathnode->path.parent = rel;
1109 pathnode->path.pathtarget = rel->reltarget;
1110 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1111 required_outer);
1112 pathnode->path.parallel_aware = (parallel_degree > 0);
1113 pathnode->path.parallel_safe = rel->consider_parallel;
1114 pathnode->path.parallel_workers = parallel_degree;
1115 pathnode->path.pathkeys = NIL; /* always unordered */
1116
1117 pathnode->bitmapqual = bitmapqual;
1118
1119 cost_bitmap_heap_scan(&pathnode->path, root, rel,
1120 pathnode->path.param_info,
1121 bitmapqual, loop_count);
1122
1123 return pathnode;
1124}
void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, Path *bitmapqual, double loop_count)
Definition: costsize.c:997
Path * bitmapqual
Definition: pathnodes.h:2033

References BitmapHeapPath::bitmapqual, RelOptInfo::consider_parallel, cost_bitmap_heap_scan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapHeapPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_index_paths(), create_partial_bitmap_paths(), and reparameterize_path().

◆ create_bitmap_or_path()

BitmapOrPath * create_bitmap_or_path ( PlannerInfo root,
RelOptInfo rel,
List bitmapquals 
)

Definition at line 1183 of file pathnode.c.

1186{
1187 BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1188 Relids required_outer = NULL;
1189 ListCell *lc;
1190
1191 pathnode->path.pathtype = T_BitmapOr;
1192 pathnode->path.parent = rel;
1193 pathnode->path.pathtarget = rel->reltarget;
1194
1195 /*
1196 * Identify the required outer rels as the union of what the child paths
1197 * depend on. (Alternatively, we could insist that the caller pass this
1198 * in, but it's more convenient and reliable to compute it here.)
1199 */
1200 foreach(lc, bitmapquals)
1201 {
1202 Path *bitmapqual = (Path *) lfirst(lc);
1203
1204 required_outer = bms_add_members(required_outer,
1205 PATH_REQ_OUTER(bitmapqual));
1206 }
1207 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1208 required_outer);
1209
1210 /*
1211 * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1212 * parallel-safe if and only if rel->consider_parallel is set. So, we can
1213 * set the flag for this path based only on the relation-level flag,
1214 * without actually iterating over the list of children.
1215 */
1216 pathnode->path.parallel_aware = false;
1217 pathnode->path.parallel_safe = rel->consider_parallel;
1218 pathnode->path.parallel_workers = 0;
1219
1220 pathnode->path.pathkeys = NIL; /* always unordered */
1221
1222 pathnode->bitmapquals = bitmapquals;
1223
1224 /* this sets bitmapselectivity as well as the regular cost fields: */
1225 cost_bitmap_or_node(pathnode, root);
1226
1227 return pathnode;
1228}
void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
Definition: costsize.c:1184
List * bitmapquals
Definition: pathnodes.h:2058

References BitmapOrPath::bitmapquals, bms_add_members(), RelOptInfo::consider_parallel, cost_bitmap_or_node(), get_baserel_parampathinfo(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, BitmapOrPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by generate_bitmap_or_paths().

◆ create_ctescan_path()

Path * create_ctescan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 1954 of file pathnode.c.

1956{
1957 Path *pathnode = makeNode(Path);
1958
1959 pathnode->pathtype = T_CteScan;
1960 pathnode->parent = rel;
1961 pathnode->pathtarget = rel->reltarget;
1962 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1963 required_outer);
1964 pathnode->parallel_aware = false;
1965 pathnode->parallel_safe = rel->consider_parallel;
1966 pathnode->parallel_workers = 0;
1967 pathnode->pathkeys = pathkeys;
1968
1969 cost_ctescan(pathnode, root, rel, pathnode->param_info);
1970
1971 return pathnode;
1972}
void cost_ctescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1682

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_cte_pathlist().

◆ create_foreign_join_path()

ForeignPath * create_foreign_join_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2113 of file pathnode.c.

2122{
2123 ForeignPath *pathnode = makeNode(ForeignPath);
2124
2125 /*
2126 * We should use get_joinrel_parampathinfo to handle parameterized paths,
2127 * but the API of this function doesn't support it, and existing
2128 * extensions aren't yet trying to build such paths anyway. For the
2129 * moment just throw an error if someone tries it; eventually we should
2130 * revisit this.
2131 */
2132 if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2133 elog(ERROR, "parameterized foreign joins are not supported yet");
2134
2135 pathnode->path.pathtype = T_ForeignScan;
2136 pathnode->path.parent = rel;
2137 pathnode->path.pathtarget = target ? target : rel->reltarget;
2138 pathnode->path.param_info = NULL; /* XXX see above */
2139 pathnode->path.parallel_aware = false;
2140 pathnode->path.parallel_safe = rel->consider_parallel;
2141 pathnode->path.parallel_workers = 0;
2142 pathnode->path.rows = rows;
2143 pathnode->path.disabled_nodes = disabled_nodes;
2144 pathnode->path.startup_cost = startup_cost;
2145 pathnode->path.total_cost = total_cost;
2146 pathnode->path.pathkeys = pathkeys;
2147
2148 pathnode->fdw_outerpath = fdw_outerpath;
2149 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2150 pathnode->fdw_private = fdw_private;
2151
2152 return pathnode;
2153}
#define bms_is_empty(a)
Definition: bitmapset.h:118
Path * fdw_outerpath
Definition: pathnodes.h:2118
List * fdw_restrictinfo
Definition: pathnodes.h:2119
List * fdw_private
Definition: pathnodes.h:2120

References bms_is_empty, RelOptInfo::consider_parallel, Path::disabled_nodes, elog, ERROR, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), and postgresGetForeignJoinPaths().

◆ create_foreign_upper_path()

ForeignPath * create_foreign_upper_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2167 of file pathnode.c.

2175{
2176 ForeignPath *pathnode = makeNode(ForeignPath);
2177
2178 /*
2179 * Upper relations should never have any lateral references, since joining
2180 * is complete.
2181 */
2183
2184 pathnode->path.pathtype = T_ForeignScan;
2185 pathnode->path.parent = rel;
2186 pathnode->path.pathtarget = target ? target : rel->reltarget;
2187 pathnode->path.param_info = NULL;
2188 pathnode->path.parallel_aware = false;
2189 pathnode->path.parallel_safe = rel->consider_parallel;
2190 pathnode->path.parallel_workers = 0;
2191 pathnode->path.rows = rows;
2192 pathnode->path.disabled_nodes = disabled_nodes;
2193 pathnode->path.startup_cost = startup_cost;
2194 pathnode->path.total_cost = total_cost;
2195 pathnode->path.pathkeys = pathkeys;
2196
2197 pathnode->fdw_outerpath = fdw_outerpath;
2198 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2199 pathnode->fdw_private = fdw_private;
2200
2201 return pathnode;
2202}

References Assert(), bms_is_empty, RelOptInfo::consider_parallel, Path::disabled_nodes, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, RelOptInfo::lateral_relids, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_foreign_final_paths(), add_foreign_grouping_paths(), and add_foreign_ordered_paths().

◆ create_foreignscan_path()

ForeignPath * create_foreignscan_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
double  rows,
int  disabled_nodes,
Cost  startup_cost,
Cost  total_cost,
List pathkeys,
Relids  required_outer,
Path fdw_outerpath,
List fdw_restrictinfo,
List fdw_private 
)

Definition at line 2065 of file pathnode.c.

2074{
2075 ForeignPath *pathnode = makeNode(ForeignPath);
2076
2077 /* Historically some FDWs were confused about when to use this */
2078 Assert(IS_SIMPLE_REL(rel));
2079
2080 pathnode->path.pathtype = T_ForeignScan;
2081 pathnode->path.parent = rel;
2082 pathnode->path.pathtarget = target ? target : rel->reltarget;
2083 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2084 required_outer);
2085 pathnode->path.parallel_aware = false;
2086 pathnode->path.parallel_safe = rel->consider_parallel;
2087 pathnode->path.parallel_workers = 0;
2088 pathnode->path.rows = rows;
2089 pathnode->path.disabled_nodes = disabled_nodes;
2090 pathnode->path.startup_cost = startup_cost;
2091 pathnode->path.total_cost = total_cost;
2092 pathnode->path.pathkeys = pathkeys;
2093
2094 pathnode->fdw_outerpath = fdw_outerpath;
2095 pathnode->fdw_restrictinfo = fdw_restrictinfo;
2096 pathnode->fdw_private = fdw_private;
2097
2098 return pathnode;
2099}
#define IS_SIMPLE_REL(rel)
Definition: pathnodes.h:895

References Assert(), RelOptInfo::consider_parallel, Path::disabled_nodes, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, get_baserel_parampathinfo(), IS_SIMPLE_REL, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ForeignPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), fileGetForeignPaths(), and postgresGetForeignPaths().

◆ create_functionscan_path()

Path * create_functionscan_path ( PlannerInfo root,
RelOptInfo rel,
List pathkeys,
Relids  required_outer 
)

Definition at line 1876 of file pathnode.c.

1878{
1879 Path *pathnode = makeNode(Path);
1880
1881 pathnode->pathtype = T_FunctionScan;
1882 pathnode->parent = rel;
1883 pathnode->pathtarget = rel->reltarget;
1884 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1885 required_outer);
1886 pathnode->parallel_aware = false;
1887 pathnode->parallel_safe = rel->consider_parallel;
1888 pathnode->parallel_workers = 0;
1889 pathnode->pathkeys = pathkeys;
1890
1891 cost_functionscan(pathnode, root, rel, pathnode->param_info);
1892
1893 return pathnode;
1894}
void cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1512

References RelOptInfo::consider_parallel, cost_functionscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_function_pathlist().

◆ create_gather_merge_path()

GatherMergePath * create_gather_merge_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List pathkeys,
Relids  required_outer,
double *  rows 
)

Definition at line 1750 of file pathnode.c.

1753{
1755 int input_disabled_nodes = 0;
1756 Cost input_startup_cost = 0;
1757 Cost input_total_cost = 0;
1758
1759 Assert(subpath->parallel_safe);
1760 Assert(pathkeys);
1761
1762 /*
1763 * The subpath should guarantee that it is adequately ordered either by
1764 * adding an explicit sort node or by using presorted input. We cannot
1765 * add an explicit Sort node for the subpath in createplan.c on additional
1766 * pathkeys, because we can't guarantee the sort would be safe. For
1767 * example, expressions may be volatile or otherwise parallel unsafe.
1768 */
1769 if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1770 elog(ERROR, "gather merge input not sufficiently sorted");
1771
1772 pathnode->path.pathtype = T_GatherMerge;
1773 pathnode->path.parent = rel;
1774 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1775 required_outer);
1776 pathnode->path.parallel_aware = false;
1777
1778 pathnode->subpath = subpath;
1779 pathnode->num_workers = subpath->parallel_workers;
1780 pathnode->path.pathkeys = pathkeys;
1781 pathnode->path.pathtarget = target ? target : rel->reltarget;
1782
1783 input_disabled_nodes += subpath->disabled_nodes;
1784 input_startup_cost += subpath->startup_cost;
1785 input_total_cost += subpath->total_cost;
1786
1787 cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1788 input_disabled_nodes, input_startup_cost,
1789 input_total_cost, rows);
1790
1791 return pathnode;
1792}
void cost_gather_merge(GatherMergePath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double *rows)
Definition: costsize.c:459
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343

References Assert(), cost_gather_merge(), elog, ERROR, get_baserel_parampathinfo(), makeNode, GatherMergePath::num_workers, Path::parallel_aware, GatherMergePath::path, Path::pathkeys, pathkeys_contained_in(), Path::pathtype, RelOptInfo::reltarget, root, subpath(), and GatherMergePath::subpath.

Referenced by create_ordered_paths(), gather_grouping_paths(), generate_gather_paths(), and generate_useful_gather_paths().

◆ create_gather_path()

GatherPath * create_gather_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
Relids  required_outer,
double *  rows 
)

Definition at line 1802 of file pathnode.c.

1804{
1805 GatherPath *pathnode = makeNode(GatherPath);
1806
1807 Assert(subpath->parallel_safe);
1808
1809 pathnode->path.pathtype = T_Gather;
1810 pathnode->path.parent = rel;
1811 pathnode->path.pathtarget = target;
1812 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1813 required_outer);
1814 pathnode->path.parallel_aware = false;
1815 pathnode->path.parallel_safe = false;
1816 pathnode->path.parallel_workers = 0;
1817 pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1818
1819 pathnode->subpath = subpath;
1820 pathnode->num_workers = subpath->parallel_workers;
1821 pathnode->single_copy = false;
1822
1823 if (pathnode->num_workers == 0)
1824 {
1825 pathnode->path.pathkeys = subpath->pathkeys;
1826 pathnode->num_workers = 1;
1827 pathnode->single_copy = true;
1828 }
1829
1830 cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
1831
1832 return pathnode;
1833}
void cost_gather(GatherPath *path, PlannerInfo *root, RelOptInfo *rel, ParamPathInfo *param_info, double *rows)
Definition: costsize.c:420
bool single_copy
Definition: pathnodes.h:2265
int num_workers
Definition: pathnodes.h:2266

References Assert(), cost_gather(), get_baserel_parampathinfo(), makeNode, NIL, GatherPath::num_workers, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GatherPath::path, Path::pathkeys, Path::pathtype, root, GatherPath::single_copy, subpath(), and GatherPath::subpath.

Referenced by generate_gather_paths(), and generate_union_paths().

◆ create_group_path()

GroupPath * create_group_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List groupClause,
List qual,
double  numGroups 
)

Definition at line 2885 of file pathnode.c.

2891{
2892 GroupPath *pathnode = makeNode(GroupPath);
2893 PathTarget *target = rel->reltarget;
2894
2895 pathnode->path.pathtype = T_Group;
2896 pathnode->path.parent = rel;
2897 pathnode->path.pathtarget = target;
2898 /* For now, assume we are above any joins, so no parameterization */
2899 pathnode->path.param_info = NULL;
2900 pathnode->path.parallel_aware = false;
2901 pathnode->path.parallel_safe = rel->consider_parallel &&
2902 subpath->parallel_safe;
2903 pathnode->path.parallel_workers = subpath->parallel_workers;
2904 /* Group doesn't change sort ordering */
2905 pathnode->path.pathkeys = subpath->pathkeys;
2906
2907 pathnode->subpath = subpath;
2908
2909 pathnode->groupClause = groupClause;
2910 pathnode->qual = qual;
2911
2912 cost_group(&pathnode->path, root,
2913 list_length(groupClause),
2914 numGroups,
2915 qual,
2916 subpath->disabled_nodes,
2917 subpath->startup_cost, subpath->total_cost,
2918 subpath->rows);
2919
2920 /* add tlist eval cost for each output row */
2921 pathnode->path.startup_cost += target->cost.startup;
2922 pathnode->path.total_cost += target->cost.startup +
2923 target->cost.per_tuple * pathnode->path.rows;
2924
2925 return pathnode;
2926}
void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, List *quals, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3201
List * qual
Definition: pathnodes.h:2458
List * groupClause
Definition: pathnodes.h:2457
Path * subpath
Definition: pathnodes.h:2456
Path path
Definition: pathnodes.h:2455

References RelOptInfo::consider_parallel, PathTarget::cost, cost_group(), GroupPath::groupClause, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupPath::qual, RelOptInfo::reltarget, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupPath::subpath, and Path::total_cost.

Referenced by add_paths_to_grouping_rel(), and create_partial_grouping_paths().

◆ create_group_result_path()

GroupResultPath * create_group_result_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List havingqual 
)

Definition at line 1609 of file pathnode.c.

1611{
1613
1614 pathnode->path.pathtype = T_Result;
1615 pathnode->path.parent = rel;
1616 pathnode->path.pathtarget = target;
1617 pathnode->path.param_info = NULL; /* there are no other rels... */
1618 pathnode->path.parallel_aware = false;
1619 pathnode->path.parallel_safe = rel->consider_parallel;
1620 pathnode->path.parallel_workers = 0;
1621 pathnode->path.pathkeys = NIL;
1622 pathnode->quals = havingqual;
1623
1624 /*
1625 * We can't quite use cost_resultscan() because the quals we want to
1626 * account for are not baserestrict quals of the rel. Might as well just
1627 * hack it here.
1628 */
1629 pathnode->path.rows = 1;
1630 pathnode->path.startup_cost = target->cost.startup;
1631 pathnode->path.total_cost = target->cost.startup +
1632 cpu_tuple_cost + target->cost.per_tuple;
1633
1634 /*
1635 * Add cost of qual, if any --- but we ignore its selectivity, since our
1636 * rowcount estimate should be 1 no matter what the qual is.
1637 */
1638 if (havingqual)
1639 {
1640 QualCost qual_cost;
1641
1642 cost_qual_eval(&qual_cost, havingqual, root);
1643 /* havingqual is evaluated once at startup */
1644 pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1645 pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1646 }
1647
1648 return pathnode;
1649}
double cpu_tuple_cost
Definition: costsize.c:132
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4765

References RelOptInfo::consider_parallel, PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupResultPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupResultPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by create_degenerate_grouping_paths(), and query_planner().

◆ create_groupingsets_path()

GroupingSetsPath * create_groupingsets_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List having_qual,
AggStrategy  aggstrategy,
List rollups,
const AggClauseCosts agg_costs 
)

Definition at line 3076 of file pathnode.c.

3083{
3085 PathTarget *target = rel->reltarget;
3086 ListCell *lc;
3087 bool is_first = true;
3088 bool is_first_sort = true;
3089
3090 /* The topmost generated Plan node will be an Agg */
3091 pathnode->path.pathtype = T_Agg;
3092 pathnode->path.parent = rel;
3093 pathnode->path.pathtarget = target;
3094 pathnode->path.param_info = subpath->param_info;
3095 pathnode->path.parallel_aware = false;
3096 pathnode->path.parallel_safe = rel->consider_parallel &&
3097 subpath->parallel_safe;
3098 pathnode->path.parallel_workers = subpath->parallel_workers;
3099 pathnode->subpath = subpath;
3100
3101 /*
3102 * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3103 * to AGG_HASHED, here if possible.
3104 */
3105 if (aggstrategy == AGG_SORTED &&
3106 list_length(rollups) == 1 &&
3107 ((RollupData *) linitial(rollups))->groupClause == NIL)
3108 aggstrategy = AGG_PLAIN;
3109
3110 if (aggstrategy == AGG_MIXED &&
3111 list_length(rollups) == 1)
3112 aggstrategy = AGG_HASHED;
3113
3114 /*
3115 * Output will be in sorted order by group_pathkeys if, and only if, there
3116 * is a single rollup operation on a non-empty list of grouping
3117 * expressions.
3118 */
3119 if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3120 pathnode->path.pathkeys = root->group_pathkeys;
3121 else
3122 pathnode->path.pathkeys = NIL;
3123
3124 pathnode->aggstrategy = aggstrategy;
3125 pathnode->rollups = rollups;
3126 pathnode->qual = having_qual;
3127 pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3128
3129 Assert(rollups != NIL);
3130 Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3131 Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3132
3133 foreach(lc, rollups)
3134 {
3135 RollupData *rollup = lfirst(lc);
3136 List *gsets = rollup->gsets;
3137 int numGroupCols = list_length(linitial(gsets));
3138
3139 /*
3140 * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3141 * (already-sorted) input, and following ones do their own sort.
3142 *
3143 * In AGG_HASHED mode, there is one rollup for each grouping set.
3144 *
3145 * In AGG_MIXED mode, the first rollups are hashed, the first
3146 * non-hashed one takes the (already-sorted) input, and following ones
3147 * do their own sort.
3148 */
3149 if (is_first)
3150 {
3151 cost_agg(&pathnode->path, root,
3152 aggstrategy,
3153 agg_costs,
3154 numGroupCols,
3155 rollup->numGroups,
3156 having_qual,
3157 subpath->disabled_nodes,
3158 subpath->startup_cost,
3159 subpath->total_cost,
3160 subpath->rows,
3161 subpath->pathtarget->width);
3162 is_first = false;
3163 if (!rollup->is_hashed)
3164 is_first_sort = false;
3165 }
3166 else
3167 {
3168 Path sort_path; /* dummy for result of cost_sort */
3169 Path agg_path; /* dummy for result of cost_agg */
3170
3171 if (rollup->is_hashed || is_first_sort)
3172 {
3173 /*
3174 * Account for cost of aggregation, but don't charge input
3175 * cost again
3176 */
3177 cost_agg(&agg_path, root,
3178 rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3179 agg_costs,
3180 numGroupCols,
3181 rollup->numGroups,
3182 having_qual,
3183 0, 0.0, 0.0,
3184 subpath->rows,
3185 subpath->pathtarget->width);
3186 if (!rollup->is_hashed)
3187 is_first_sort = false;
3188 }
3189 else
3190 {
3191 /* Account for cost of sort, but don't charge input cost again */
3192 cost_sort(&sort_path, root, NIL, 0,
3193 0.0,
3194 subpath->rows,
3195 subpath->pathtarget->width,
3196 0.0,
3197 work_mem,
3198 -1.0);
3199
3200 /* Account for cost of aggregation */
3201
3202 cost_agg(&agg_path, root,
3203 AGG_SORTED,
3204 agg_costs,
3205 numGroupCols,
3206 rollup->numGroups,
3207 having_qual,
3208 sort_path.disabled_nodes,
3209 sort_path.startup_cost,
3210 sort_path.total_cost,
3211 sort_path.rows,
3212 subpath->pathtarget->width);
3213 }
3214
3215 pathnode->path.disabled_nodes += agg_path.disabled_nodes;
3216 pathnode->path.total_cost += agg_path.total_cost;
3217 pathnode->path.rows += agg_path.rows;
3218 }
3219 }
3220
3221 /* add tlist eval cost for each output row */
3222 pathnode->path.startup_cost += target->cost.startup;
3223 pathnode->path.total_cost += target->cost.startup +
3224 target->cost.per_tuple * pathnode->path.rows;
3225
3226 return pathnode;
3227}
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2118
int work_mem
Definition: globals.c:131
@ AGG_HASHED
Definition: nodes.h:366
@ AGG_MIXED
Definition: nodes.h:367
@ AGG_PLAIN
Definition: nodes.h:364
uint64 transitionSpace
Definition: pathnodes.h:2530
AggStrategy aggstrategy
Definition: pathnodes.h:2527
Cardinality numGroups
Definition: pathnodes.h:2514
List * gsets
Definition: pathnodes.h:2512
bool is_hashed
Definition: pathnodes.h:2516

References AGG_HASHED, AGG_MIXED, AGG_PLAIN, AGG_SORTED, GroupingSetsPath::aggstrategy, Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cost_agg(), cost_sort(), Path::disabled_nodes, RollupData::gsets, RollupData::is_hashed, lfirst, linitial, list_length(), makeNode, NIL, RollupData::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, GroupingSetsPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, GroupingSetsPath::qual, RelOptInfo::reltarget, GroupingSetsPath::rollups, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), GroupingSetsPath::subpath, Path::total_cost, AggClauseCosts::transitionSpace, GroupingSetsPath::transitionSpace, and work_mem.

Referenced by consider_groupingsets_paths().

◆ create_hashjoin_path()

HashPath * create_hashjoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
bool  parallel_hash,
List restrict_clauses,
Relids  required_outer,
List hashclauses 
)

Definition at line 2458 of file pathnode.c.

2469{
2470 HashPath *pathnode = makeNode(HashPath);
2471
2472 pathnode->jpath.path.pathtype = T_HashJoin;
2473 pathnode->jpath.path.parent = joinrel;
2474 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2475 pathnode->jpath.path.param_info =
2477 joinrel,
2478 outer_path,
2479 inner_path,
2480 extra->sjinfo,
2481 required_outer,
2482 &restrict_clauses);
2483 pathnode->jpath.path.parallel_aware =
2484 joinrel->consider_parallel && parallel_hash;
2485 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2486 outer_path->parallel_safe && inner_path->parallel_safe;
2487 /* This is a foolish way to estimate parallel_workers, but for now... */
2488 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2489
2490 /*
2491 * A hashjoin never has pathkeys, since its output ordering is
2492 * unpredictable due to possible batching. XXX If the inner relation is
2493 * small enough, we could instruct the executor that it must not batch,
2494 * and then we could assume that the output inherits the outer relation's
2495 * ordering, which might save a sort step. However there is considerable
2496 * downside if our estimate of the inner relation size is badly off. For
2497 * the moment we don't risk it. (Note also that if we wanted to take this
2498 * seriously, joinpath.c would have to consider many more paths for the
2499 * outer rel than it does now.)
2500 */
2501 pathnode->jpath.path.pathkeys = NIL;
2502 pathnode->jpath.jointype = jointype;
2503 pathnode->jpath.inner_unique = extra->inner_unique;
2504 pathnode->jpath.outerjoinpath = outer_path;
2505 pathnode->jpath.innerjoinpath = inner_path;
2506 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2507 pathnode->path_hashclauses = hashclauses;
2508 /* final_cost_hashjoin will fill in pathnode->num_batches */
2509
2510 final_cost_hashjoin(root, pathnode, workspace, extra);
2511
2512 return pathnode;
2513}
void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:4283
ParamPathInfo * get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, Relids required_outer, List **restrict_clauses)
Definition: relnode.c:1781
List * path_hashclauses
Definition: pathnodes.h:2382
JoinPath jpath
Definition: pathnodes.h:2381
SpecialJoinInfo * sjinfo
Definition: pathnodes.h:3499
Path * outerjoinpath
Definition: pathnodes.h:2296
Path * innerjoinpath
Definition: pathnodes.h:2297
JoinType jointype
Definition: pathnodes.h:2291
bool inner_unique
Definition: pathnodes.h:2293
List * joinrestrictinfo
Definition: pathnodes.h:2299

References RelOptInfo::consider_parallel, final_cost_hashjoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, HashPath::jpath, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, HashPath::path_hashclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_hashjoin_path(), and try_partial_hashjoin_path().

◆ create_incremental_sort_path()

IncrementalSortPath * create_incremental_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
int  presorted_keys,
double  limit_tuples 
)

Definition at line 2792 of file pathnode.c.

2798{
2800 SortPath *pathnode = &sort->spath;
2801
2802 pathnode->path.pathtype = T_IncrementalSort;
2803 pathnode->path.parent = rel;
2804 /* Sort doesn't project, so use source path's pathtarget */
2805 pathnode->path.pathtarget = subpath->pathtarget;
2806 pathnode->path.param_info = subpath->param_info;
2807 pathnode->path.parallel_aware = false;
2808 pathnode->path.parallel_safe = rel->consider_parallel &&
2809 subpath->parallel_safe;
2810 pathnode->path.parallel_workers = subpath->parallel_workers;
2811 pathnode->path.pathkeys = pathkeys;
2812
2813 pathnode->subpath = subpath;
2814
2815 cost_incremental_sort(&pathnode->path,
2816 root, pathkeys, presorted_keys,
2817 subpath->disabled_nodes,
2818 subpath->startup_cost,
2819 subpath->total_cost,
2820 subpath->rows,
2821 subpath->pathtarget->width,
2822 0.0, /* XXX comparison_cost shouldn't be 0? */
2823 work_mem, limit_tuples);
2824
2825 sort->nPresortedCols = presorted_keys;
2826
2827 return sort;
2828}
Datum sort(PG_FUNCTION_ARGS)
Definition: _int_op.c:198
void cost_incremental_sort(Path *path, PlannerInfo *root, List *pathkeys, int presorted_keys, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:1974
Path path
Definition: pathnodes.h:2429
Path * subpath
Definition: pathnodes.h:2430

References RelOptInfo::consider_parallel, cost_incremental_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, sort(), subpath(), SortPath::subpath, and work_mem.

Referenced by build_setop_child_paths(), create_final_unique_paths(), create_one_window_path(), create_ordered_paths(), create_partial_unique_paths(), gather_grouping_paths(), generate_grouped_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_index_path()

IndexPath * create_index_path ( PlannerInfo root,
IndexOptInfo index,
List indexclauses,
List indexorderbys,
List indexorderbycols,
List pathkeys,
ScanDirection  indexscandir,
bool  indexonly,
Relids  required_outer,
double  loop_count,
bool  partial_path 
)

Definition at line 1049 of file pathnode.c.

1060{
1061 IndexPath *pathnode = makeNode(IndexPath);
1062 RelOptInfo *rel = index->rel;
1063
1064 pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1065 pathnode->path.parent = rel;
1066 pathnode->path.pathtarget = rel->reltarget;
1067 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1068 required_outer);
1069 pathnode->path.parallel_aware = false;
1070 pathnode->path.parallel_safe = rel->consider_parallel;
1071 pathnode->path.parallel_workers = 0;
1072 pathnode->path.pathkeys = pathkeys;
1073
1074 pathnode->indexinfo = index;
1075 pathnode->indexclauses = indexclauses;
1076 pathnode->indexorderbys = indexorderbys;
1077 pathnode->indexorderbycols = indexorderbycols;
1078 pathnode->indexscandir = indexscandir;
1079
1080 cost_index(pathnode, root, loop_count, partial_path);
1081
1082 return pathnode;
1083}
void cost_index(IndexPath *path, PlannerInfo *root, double loop_count, bool partial_path)
Definition: costsize.c:534
List * indexclauses
Definition: pathnodes.h:1959
ScanDirection indexscandir
Definition: pathnodes.h:1962
Path path
Definition: pathnodes.h:1957
List * indexorderbycols
Definition: pathnodes.h:1961
List * indexorderbys
Definition: pathnodes.h:1960
IndexOptInfo * indexinfo
Definition: pathnodes.h:1958
Definition: type.h:96

References RelOptInfo::consider_parallel, cost_index(), get_baserel_parampathinfo(), IndexPath::indexclauses, IndexPath::indexinfo, IndexPath::indexorderbycols, IndexPath::indexorderbys, IndexPath::indexscandir, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, IndexPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by build_index_paths(), and plan_cluster_use_sort().

◆ create_limit_path()

LimitPath * create_limit_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
Node limitOffset,
Node limitCount,
LimitOption  limitOption,
int64  offset_est,
int64  count_est 
)

Definition at line 3729 of file pathnode.c.

3734{
3735 LimitPath *pathnode = makeNode(LimitPath);
3736
3737 pathnode->path.pathtype = T_Limit;
3738 pathnode->path.parent = rel;
3739 /* Limit doesn't project, so use source path's pathtarget */
3740 pathnode->path.pathtarget = subpath->pathtarget;
3741 /* For now, assume we are above any joins, so no parameterization */
3742 pathnode->path.param_info = NULL;
3743 pathnode->path.parallel_aware = false;
3744 pathnode->path.parallel_safe = rel->consider_parallel &&
3745 subpath->parallel_safe;
3746 pathnode->path.parallel_workers = subpath->parallel_workers;
3747 pathnode->path.rows = subpath->rows;
3748 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3749 pathnode->path.startup_cost = subpath->startup_cost;
3750 pathnode->path.total_cost = subpath->total_cost;
3751 pathnode->path.pathkeys = subpath->pathkeys;
3752 pathnode->subpath = subpath;
3753 pathnode->limitOffset = limitOffset;
3754 pathnode->limitCount = limitCount;
3755 pathnode->limitOption = limitOption;
3756
3757 /*
3758 * Adjust the output rows count and costs according to the offset/limit.
3759 */
3761 &pathnode->path.startup_cost,
3762 &pathnode->path.total_cost,
3763 offset_est, count_est);
3764
3765 return pathnode;
3766}
void adjust_limit_rows_costs(double *rows, Cost *startup_cost, Cost *total_cost, int64 offset_est, int64 count_est)
Definition: pathnode.c:3785
Path path
Definition: pathnodes.h:2628
Path * subpath
Definition: pathnodes.h:2629
LimitOption limitOption
Definition: pathnodes.h:2632
Node * limitOffset
Definition: pathnodes.h:2630
Node * limitCount
Definition: pathnodes.h:2631

References adjust_limit_rows_costs(), RelOptInfo::consider_parallel, Path::disabled_nodes, LimitPath::limitCount, LimitPath::limitOffset, LimitPath::limitOption, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LimitPath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), LimitPath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_partial_distinct_paths(), and grouping_planner().

◆ create_lockrows_path()

LockRowsPath * create_lockrows_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List rowMarks,
int  epqParam 
)

Definition at line 3567 of file pathnode.c.

3569{
3570 LockRowsPath *pathnode = makeNode(LockRowsPath);
3571
3572 pathnode->path.pathtype = T_LockRows;
3573 pathnode->path.parent = rel;
3574 /* LockRows doesn't project, so use source path's pathtarget */
3575 pathnode->path.pathtarget = subpath->pathtarget;
3576 /* For now, assume we are above any joins, so no parameterization */
3577 pathnode->path.param_info = NULL;
3578 pathnode->path.parallel_aware = false;
3579 pathnode->path.parallel_safe = false;
3580 pathnode->path.parallel_workers = 0;
3581 pathnode->path.rows = subpath->rows;
3582
3583 /*
3584 * The result cannot be assumed sorted, since locking might cause the sort
3585 * key columns to be replaced with new values.
3586 */
3587 pathnode->path.pathkeys = NIL;
3588
3589 pathnode->subpath = subpath;
3590 pathnode->rowMarks = rowMarks;
3591 pathnode->epqParam = epqParam;
3592
3593 /*
3594 * We should charge something extra for the costs of row locking and
3595 * possible refetches, but it's hard to say how much. For now, use
3596 * cpu_tuple_cost per row.
3597 */
3598 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3599 pathnode->path.startup_cost = subpath->startup_cost;
3600 pathnode->path.total_cost = subpath->total_cost +
3601 cpu_tuple_cost * subpath->rows;
3602
3603 return pathnode;
3604}
Path * subpath
Definition: pathnodes.h:2590
List * rowMarks
Definition: pathnodes.h:2591

References cpu_tuple_cost, Path::disabled_nodes, LockRowsPath::epqParam, makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, LockRowsPath::path, Path::pathkeys, Path::pathtype, LockRowsPath::rowMarks, Path::rows, Path::startup_cost, subpath(), LockRowsPath::subpath, and Path::total_cost.

Referenced by grouping_planner().

◆ create_material_path()

MaterialPath * create_material_path ( RelOptInfo rel,
Path subpath 
)

Definition at line 1657 of file pathnode.c.

1658{
1659 MaterialPath *pathnode = makeNode(MaterialPath);
1660
1661 Assert(subpath->parent == rel);
1662
1663 pathnode->path.pathtype = T_Material;
1664 pathnode->path.parent = rel;
1665 pathnode->path.pathtarget = rel->reltarget;
1666 pathnode->path.param_info = subpath->param_info;
1667 pathnode->path.parallel_aware = false;
1668 pathnode->path.parallel_safe = rel->consider_parallel &&
1669 subpath->parallel_safe;
1670 pathnode->path.parallel_workers = subpath->parallel_workers;
1671 pathnode->path.pathkeys = subpath->pathkeys;
1672
1673 pathnode->subpath = subpath;
1674
1675 cost_material(&pathnode->path,
1676 subpath->disabled_nodes,
1677 subpath->startup_cost,
1678 subpath->total_cost,
1679 subpath->rows,
1680 subpath->pathtarget->width);
1681
1682 return pathnode;
1683}
void cost_material(Path *path, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples, int width)
Definition: costsize.c:2483
Path * subpath
Definition: pathnodes.h:2230

References Assert(), RelOptInfo::consider_parallel, cost_material(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MaterialPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, subpath(), and MaterialPath::subpath.

Referenced by consider_parallel_nestloop(), match_unsorted_outer(), reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_memoize_path()

MemoizePath * create_memoize_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List param_exprs,
List hash_operators,
bool  singlerow,
bool  binary_mode,
Cardinality  est_calls 
)

Definition at line 1690 of file pathnode.c.

1693{
1694 MemoizePath *pathnode = makeNode(MemoizePath);
1695
1696 Assert(subpath->parent == rel);
1697
1698 pathnode->path.pathtype = T_Memoize;
1699 pathnode->path.parent = rel;
1700 pathnode->path.pathtarget = rel->reltarget;
1701 pathnode->path.param_info = subpath->param_info;
1702 pathnode->path.parallel_aware = false;
1703 pathnode->path.parallel_safe = rel->consider_parallel &&
1704 subpath->parallel_safe;
1705 pathnode->path.parallel_workers = subpath->parallel_workers;
1706 pathnode->path.pathkeys = subpath->pathkeys;
1707
1708 pathnode->subpath = subpath;
1709 pathnode->hash_operators = hash_operators;
1710 pathnode->param_exprs = param_exprs;
1711 pathnode->singlerow = singlerow;
1712 pathnode->binary_mode = binary_mode;
1713
1714 /*
1715 * For now we set est_entries to 0. cost_memoize_rescan() does all the
1716 * hard work to determine how many cache entries there are likely to be,
1717 * so it seems best to leave it up to that function to fill this field in.
1718 * If left at 0, the executor will make a guess at a good value.
1719 */
1720 pathnode->est_entries = 0;
1721
1722 pathnode->est_calls = clamp_row_est(est_calls);
1723
1724 /* These will also be set later in cost_memoize_rescan() */
1725 pathnode->est_unique_keys = 0.0;
1726 pathnode->est_hit_ratio = 0.0;
1727
1728 /* we should not generate this path type when enable_memoize=false */
1730 pathnode->path.disabled_nodes = subpath->disabled_nodes;
1731
1732 /*
1733 * Add a small additional charge for caching the first entry. All the
1734 * harder calculations for rescans are performed in cost_memoize_rescan().
1735 */
1736 pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1737 pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1738 pathnode->path.rows = subpath->rows;
1739
1740 return pathnode;
1741}
bool enable_memoize
Definition: costsize.c:155
Cardinality est_calls
Definition: pathnodes.h:2251
bool singlerow
Definition: pathnodes.h:2244
List * hash_operators
Definition: pathnodes.h:2242
uint32 est_entries
Definition: pathnodes.h:2248
bool binary_mode
Definition: pathnodes.h:2246
double est_hit_ratio
Definition: pathnodes.h:2253
Cardinality est_unique_keys
Definition: pathnodes.h:2252
Path * subpath
Definition: pathnodes.h:2241
List * param_exprs
Definition: pathnodes.h:2243

References Assert(), MemoizePath::binary_mode, clamp_row_est(), RelOptInfo::consider_parallel, cpu_tuple_cost, Path::disabled_nodes, enable_memoize, MemoizePath::est_calls, MemoizePath::est_entries, MemoizePath::est_hit_ratio, MemoizePath::est_unique_keys, MemoizePath::hash_operators, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MemoizePath::param_exprs, MemoizePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, Path::rows, MemoizePath::singlerow, Path::startup_cost, subpath(), MemoizePath::subpath, and Path::total_cost.

Referenced by get_memoize_path(), and reparameterize_path().

◆ create_merge_append_path()

MergeAppendPath * create_merge_append_path ( PlannerInfo root,
RelOptInfo rel,
List subpaths,
List pathkeys,
Relids  required_outer 
)

Definition at line 1471 of file pathnode.c.

1476{
1478 int input_disabled_nodes;
1479 Cost input_startup_cost;
1480 Cost input_total_cost;
1481 ListCell *l;
1482
1483 /*
1484 * We don't currently support parameterized MergeAppend paths, as
1485 * explained in the comments for generate_orderedappend_paths.
1486 */
1487 Assert(bms_is_empty(rel->lateral_relids) && bms_is_empty(required_outer));
1488
1489 pathnode->path.pathtype = T_MergeAppend;
1490 pathnode->path.parent = rel;
1491 pathnode->path.pathtarget = rel->reltarget;
1492 pathnode->path.param_info = NULL;
1493 pathnode->path.parallel_aware = false;
1494 pathnode->path.parallel_safe = rel->consider_parallel;
1495 pathnode->path.parallel_workers = 0;
1496 pathnode->path.pathkeys = pathkeys;
1497 pathnode->subpaths = subpaths;
1498
1499 /*
1500 * Apply query-wide LIMIT if known and path is for sole base relation.
1501 * (Handling this at this low level is a bit klugy.)
1502 */
1503 if (bms_equal(rel->relids, root->all_query_rels))
1504 pathnode->limit_tuples = root->limit_tuples;
1505 else
1506 pathnode->limit_tuples = -1.0;
1507
1508 /*
1509 * Add up the sizes and costs of the input paths.
1510 */
1511 pathnode->path.rows = 0;
1512 input_disabled_nodes = 0;
1513 input_startup_cost = 0;
1514 input_total_cost = 0;
1515 foreach(l, subpaths)
1516 {
1517 Path *subpath = (Path *) lfirst(l);
1518 int presorted_keys;
1519 Path sort_path; /* dummy for result of
1520 * cost_sort/cost_incremental_sort */
1521
1522 /* All child paths should be unparameterized */
1524
1525 pathnode->path.rows += subpath->rows;
1526 pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1527 subpath->parallel_safe;
1528
1529 if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
1530 &presorted_keys))
1531 {
1532 /*
1533 * We'll need to insert a Sort node, so include costs for that. We
1534 * choose to use incremental sort if it is enabled and there are
1535 * presorted keys; otherwise we use full sort.
1536 *
1537 * We can use the parent's LIMIT if any, since we certainly won't
1538 * pull more than that many tuples from any child.
1539 */
1540 if (enable_incremental_sort && presorted_keys > 0)
1541 {
1542 cost_incremental_sort(&sort_path,
1543 root,
1544 pathkeys,
1545 presorted_keys,
1546 subpath->disabled_nodes,
1547 subpath->startup_cost,
1548 subpath->total_cost,
1549 subpath->rows,
1550 subpath->pathtarget->width,
1551 0.0,
1552 work_mem,
1553 pathnode->limit_tuples);
1554 }
1555 else
1556 {
1557 cost_sort(&sort_path,
1558 root,
1559 pathkeys,
1560 subpath->disabled_nodes,
1561 subpath->total_cost,
1562 subpath->rows,
1563 subpath->pathtarget->width,
1564 0.0,
1565 work_mem,
1566 pathnode->limit_tuples);
1567 }
1568
1569 subpath = &sort_path;
1570 }
1571
1572 input_disabled_nodes += subpath->disabled_nodes;
1573 input_startup_cost += subpath->startup_cost;
1574 input_total_cost += subpath->total_cost;
1575 }
1576
1577 /*
1578 * Now we can compute total costs of the MergeAppend. If there's exactly
1579 * one child path and its parallel awareness matches that of the
1580 * MergeAppend, then the MergeAppend is a no-op and will be discarded
1581 * later (in setrefs.c); otherwise we do the normal cost calculation.
1582 */
1583 if (list_length(subpaths) == 1 &&
1584 ((Path *) linitial(subpaths))->parallel_aware ==
1585 pathnode->path.parallel_aware)
1586 {
1587 pathnode->path.disabled_nodes = input_disabled_nodes;
1588 pathnode->path.startup_cost = input_startup_cost;
1589 pathnode->path.total_cost = input_total_cost;
1590 }
1591 else
1592 cost_merge_append(&pathnode->path, root,
1593 pathkeys, list_length(subpaths),
1594 input_disabled_nodes,
1595 input_startup_cost, input_total_cost,
1596 pathnode->path.rows);
1597
1598 return pathnode;
1599}
void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double tuples)
Definition: costsize.c:2432
bool enable_incremental_sort
Definition: costsize.c:151
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
Cardinality limit_tuples
Definition: pathnodes.h:2205

References Assert(), bms_equal(), bms_is_empty, RelOptInfo::consider_parallel, cost_incremental_sort(), cost_merge_append(), cost_sort(), Path::disabled_nodes, enable_incremental_sort, RelOptInfo::lateral_relids, lfirst, MergeAppendPath::limit_tuples, linitial, list_length(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MergeAppendPath::path, PATH_REQ_OUTER, Path::pathkeys, pathkeys_count_contained_in(), Path::pathtype, RelOptInfo::relids, RelOptInfo::reltarget, root, Path::rows, Path::startup_cost, subpath(), MergeAppendPath::subpaths, Path::total_cost, and work_mem.

Referenced by generate_orderedappend_paths(), and generate_union_paths().

◆ create_mergejoin_path()

MergePath * create_mergejoin_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer,
List mergeclauses,
List outersortkeys,
List innersortkeys,
int  outer_presorted_keys 
)

Definition at line 2390 of file pathnode.c.

2404{
2405 MergePath *pathnode = makeNode(MergePath);
2406
2407 pathnode->jpath.path.pathtype = T_MergeJoin;
2408 pathnode->jpath.path.parent = joinrel;
2409 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2410 pathnode->jpath.path.param_info =
2412 joinrel,
2413 outer_path,
2414 inner_path,
2415 extra->sjinfo,
2416 required_outer,
2417 &restrict_clauses);
2418 pathnode->jpath.path.parallel_aware = false;
2419 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2420 outer_path->parallel_safe && inner_path->parallel_safe;
2421 /* This is a foolish way to estimate parallel_workers, but for now... */
2422 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2423 pathnode->jpath.path.pathkeys = pathkeys;
2424 pathnode->jpath.jointype = jointype;
2425 pathnode->jpath.inner_unique = extra->inner_unique;
2426 pathnode->jpath.outerjoinpath = outer_path;
2427 pathnode->jpath.innerjoinpath = inner_path;
2428 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2429 pathnode->path_mergeclauses = mergeclauses;
2430 pathnode->outersortkeys = outersortkeys;
2431 pathnode->innersortkeys = innersortkeys;
2432 pathnode->outer_presorted_keys = outer_presorted_keys;
2433 /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2434 /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2435
2436 final_cost_mergejoin(root, pathnode, workspace, extra);
2437
2438 return pathnode;
2439}
void final_cost_mergejoin(PlannerInfo *root, MergePath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3843
List * outersortkeys
Definition: pathnodes.h:2362
List * innersortkeys
Definition: pathnodes.h:2363
JoinPath jpath
Definition: pathnodes.h:2360
int outer_presorted_keys
Definition: pathnodes.h:2364
List * path_mergeclauses
Definition: pathnodes.h:2361

References RelOptInfo::consider_parallel, final_cost_mergejoin(), get_joinrel_parampathinfo(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, MergePath::innersortkeys, JoinPath::joinrestrictinfo, JoinPath::jointype, MergePath::jpath, makeNode, MergePath::outer_presorted_keys, JoinPath::outerjoinpath, MergePath::outersortkeys, Path::parallel_safe, Path::parallel_workers, MergePath::path_mergeclauses, RelOptInfo::reltarget, root, and JoinPathExtraData::sjinfo.

Referenced by try_mergejoin_path(), and try_partial_mergejoin_path().

◆ create_minmaxagg_path()

MinMaxAggPath * create_minmaxagg_path ( PlannerInfo root,
RelOptInfo rel,
PathTarget target,
List mmaggregates,
List quals 
)

Definition at line 3239 of file pathnode.c.

3244{
3246 Cost initplan_cost;
3247 int initplan_disabled_nodes = 0;
3248 ListCell *lc;
3249
3250 /* The topmost generated Plan node will be a Result */
3251 pathnode->path.pathtype = T_Result;
3252 pathnode->path.parent = rel;
3253 pathnode->path.pathtarget = target;
3254 /* For now, assume we are above any joins, so no parameterization */
3255 pathnode->path.param_info = NULL;
3256 pathnode->path.parallel_aware = false;
3257 pathnode->path.parallel_safe = true; /* might change below */
3258 pathnode->path.parallel_workers = 0;
3259 /* Result is one unordered row */
3260 pathnode->path.rows = 1;
3261 pathnode->path.pathkeys = NIL;
3262
3263 pathnode->mmaggregates = mmaggregates;
3264 pathnode->quals = quals;
3265
3266 /* Calculate cost of all the initplans, and check parallel safety */
3267 initplan_cost = 0;
3268 foreach(lc, mmaggregates)
3269 {
3270 MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3271
3272 initplan_disabled_nodes += mminfo->path->disabled_nodes;
3273 initplan_cost += mminfo->pathcost;
3274 if (!mminfo->path->parallel_safe)
3275 pathnode->path.parallel_safe = false;
3276 }
3277
3278 /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3279 pathnode->path.disabled_nodes = initplan_disabled_nodes;
3280 pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3281 pathnode->path.total_cost = initplan_cost + target->cost.startup +
3282 target->cost.per_tuple + cpu_tuple_cost;
3283
3284 /*
3285 * Add cost of qual, if any --- but we ignore its selectivity, since our
3286 * rowcount estimate should be 1 no matter what the qual is.
3287 */
3288 if (quals)
3289 {
3290 QualCost qual_cost;
3291
3292 cost_qual_eval(&qual_cost, quals, root);
3293 pathnode->path.startup_cost += qual_cost.startup;
3294 pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3295 }
3296
3297 /*
3298 * If the initplans were all parallel-safe, also check safety of the
3299 * target and quals. (The Result node itself isn't parallelizable, but if
3300 * we are in a subquery then it can be useful for the outer query to know
3301 * that this one is parallel-safe.)
3302 */
3303 if (pathnode->path.parallel_safe)
3304 pathnode->path.parallel_safe =
3305 is_parallel_safe(root, (Node *) target->exprs) &&
3306 is_parallel_safe(root, (Node *) quals);
3307
3308 return pathnode;
3309}
List * quals
Definition: pathnodes.h:2540
List * mmaggregates
Definition: pathnodes.h:2539

References PathTarget::cost, cost_qual_eval(), cpu_tuple_cost, Path::disabled_nodes, PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, MinMaxAggPath::mmaggregates, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, MinMaxAggPath::path, MinMaxAggInfo::path, MinMaxAggInfo::pathcost, Path::pathkeys, Path::pathtype, QualCost::per_tuple, MinMaxAggPath::quals, root, Path::rows, QualCost::startup, Path::startup_cost, and Path::total_cost.

Referenced by preprocess_minmax_aggregates().

◆ create_modifytable_path()

ModifyTablePath * create_modifytable_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
CmdType  operation,
bool  canSetTag,
Index  nominalRelation,
Index  rootRelation,
List resultRelations,
List updateColnosLists,
List withCheckOptionLists,
List returningLists,
List rowMarks,
OnConflictExpr onconflict,
List mergeActionLists,
List mergeJoinConditions,
int  epqParam 
)

Definition at line 3629 of file pathnode.c.

3639{
3641
3642 Assert(operation == CMD_MERGE ||
3643 (operation == CMD_UPDATE ?
3644 list_length(resultRelations) == list_length(updateColnosLists) :
3645 updateColnosLists == NIL));
3646 Assert(withCheckOptionLists == NIL ||
3647 list_length(resultRelations) == list_length(withCheckOptionLists));
3648 Assert(returningLists == NIL ||
3649 list_length(resultRelations) == list_length(returningLists));
3650
3651 pathnode->path.pathtype = T_ModifyTable;
3652 pathnode->path.parent = rel;
3653 /* pathtarget is not interesting, just make it minimally valid */
3654 pathnode->path.pathtarget = rel->reltarget;
3655 /* For now, assume we are above any joins, so no parameterization */
3656 pathnode->path.param_info = NULL;
3657 pathnode->path.parallel_aware = false;
3658 pathnode->path.parallel_safe = false;
3659 pathnode->path.parallel_workers = 0;
3660 pathnode->path.pathkeys = NIL;
3661
3662 /*
3663 * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3664 *
3665 * Currently, we don't charge anything extra for the actual table
3666 * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3667 * expressions if any. It would only be window dressing, since
3668 * ModifyTable is always a top-level node and there is no way for the
3669 * costs to change any higher-level planning choices. But we might want
3670 * to make it look better sometime.
3671 */
3672 pathnode->path.disabled_nodes = subpath->disabled_nodes;
3673 pathnode->path.startup_cost = subpath->startup_cost;
3674 pathnode->path.total_cost = subpath->total_cost;
3675 if (returningLists != NIL)
3676 {
3677 pathnode->path.rows = subpath->rows;
3678
3679 /*
3680 * Set width to match the subpath output. XXX this is totally wrong:
3681 * we should return an average of the RETURNING tlist widths. But
3682 * it's what happened historically, and improving it is a task for
3683 * another day. (Again, it's mostly window dressing.)
3684 */
3685 pathnode->path.pathtarget->width = subpath->pathtarget->width;
3686 }
3687 else
3688 {
3689 pathnode->path.rows = 0;
3690 pathnode->path.pathtarget->width = 0;
3691 }
3692
3693 pathnode->subpath = subpath;
3694 pathnode->operation = operation;
3695 pathnode->canSetTag = canSetTag;
3696 pathnode->nominalRelation = nominalRelation;
3697 pathnode->rootRelation = rootRelation;
3698 pathnode->resultRelations = resultRelations;
3699 pathnode->updateColnosLists = updateColnosLists;
3700 pathnode->withCheckOptionLists = withCheckOptionLists;
3701 pathnode->returningLists = returningLists;
3702 pathnode->rowMarks = rowMarks;
3703 pathnode->onconflict = onconflict;
3704 pathnode->epqParam = epqParam;
3705 pathnode->mergeActionLists = mergeActionLists;
3706 pathnode->mergeJoinConditions = mergeJoinConditions;
3707
3708 return pathnode;
3709}
@ CMD_MERGE
Definition: nodes.h:279
@ CMD_UPDATE
Definition: nodes.h:276
List * returningLists
Definition: pathnodes.h:2613
List * resultRelations
Definition: pathnodes.h:2610
List * withCheckOptionLists
Definition: pathnodes.h:2612
List * mergeJoinConditions
Definition: pathnodes.h:2619
List * updateColnosLists
Definition: pathnodes.h:2611
OnConflictExpr * onconflict
Definition: pathnodes.h:2615
CmdType operation
Definition: pathnodes.h:2606
Index rootRelation
Definition: pathnodes.h:2609
Index nominalRelation
Definition: pathnodes.h:2608
List * mergeActionLists
Definition: pathnodes.h:2617

References Assert(), ModifyTablePath::canSetTag, CMD_MERGE, CMD_UPDATE, Path::disabled_nodes, ModifyTablePath::epqParam, list_length(), makeNode, ModifyTablePath::mergeActionLists, ModifyTablePath::mergeJoinConditions, NIL, ModifyTablePath::nominalRelation, ModifyTablePath::onconflict, ModifyTablePath::operation, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ModifyTablePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, ModifyTablePath::resultRelations, ModifyTablePath::returningLists, ModifyTablePath::rootRelation, ModifyTablePath::rowMarks, Path::rows, Path::startup_cost, subpath(), ModifyTablePath::subpath, Path::total_cost, ModifyTablePath::updateColnosLists, and ModifyTablePath::withCheckOptionLists.

Referenced by grouping_planner().

◆ create_namedtuplestorescan_path()

Path * create_namedtuplestorescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1980 of file pathnode.c.

1982{
1983 Path *pathnode = makeNode(Path);
1984
1985 pathnode->pathtype = T_NamedTuplestoreScan;
1986 pathnode->parent = rel;
1987 pathnode->pathtarget = rel->reltarget;
1988 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1989 required_outer);
1990 pathnode->parallel_aware = false;
1991 pathnode->parallel_safe = rel->consider_parallel;
1992 pathnode->parallel_workers = 0;
1993 pathnode->pathkeys = NIL; /* result is always unordered */
1994
1995 cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
1996
1997 return pathnode;
1998}
void cost_namedtuplestorescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1724

References RelOptInfo::consider_parallel, cost_namedtuplestorescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_namedtuplestore_pathlist().

◆ create_nestloop_path()

NestPath * create_nestloop_path ( PlannerInfo root,
RelOptInfo joinrel,
JoinType  jointype,
JoinCostWorkspace workspace,
JoinPathExtraData extra,
Path outer_path,
Path inner_path,
List restrict_clauses,
List pathkeys,
Relids  required_outer 
)

Definition at line 2293 of file pathnode.c.

2303{
2304 NestPath *pathnode = makeNode(NestPath);
2305 Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2306 Relids outerrelids;
2307
2308 /*
2309 * Paths are parameterized by top-level parents, so run parameterization
2310 * tests on the parent relids.
2311 */
2312 if (outer_path->parent->top_parent_relids)
2313 outerrelids = outer_path->parent->top_parent_relids;
2314 else
2315 outerrelids = outer_path->parent->relids;
2316
2317 /*
2318 * If the inner path is parameterized by the outer, we must drop any
2319 * restrict_clauses that are due to be moved into the inner path. We have
2320 * to do this now, rather than postpone the work till createplan time,
2321 * because the restrict_clauses list can affect the size and cost
2322 * estimates for this path. We detect such clauses by checking for serial
2323 * number match to clauses already enforced in the inner path.
2324 */
2325 if (bms_overlap(inner_req_outer, outerrelids))
2326 {
2327 Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2328 List *jclauses = NIL;
2329 ListCell *lc;
2330
2331 foreach(lc, restrict_clauses)
2332 {
2333 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2334
2335 if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2336 jclauses = lappend(jclauses, rinfo);
2337 }
2338 restrict_clauses = jclauses;
2339 }
2340
2341 pathnode->jpath.path.pathtype = T_NestLoop;
2342 pathnode->jpath.path.parent = joinrel;
2343 pathnode->jpath.path.pathtarget = joinrel->reltarget;
2344 pathnode->jpath.path.param_info =
2346 joinrel,
2347 outer_path,
2348 inner_path,
2349 extra->sjinfo,
2350 required_outer,
2351 &restrict_clauses);
2352 pathnode->jpath.path.parallel_aware = false;
2353 pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2354 outer_path->parallel_safe && inner_path->parallel_safe;
2355 /* This is a foolish way to estimate parallel_workers, but for now... */
2356 pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2357 pathnode->jpath.path.pathkeys = pathkeys;
2358 pathnode->jpath.jointype = jointype;
2359 pathnode->jpath.inner_unique = extra->inner_unique;
2360 pathnode->jpath.outerjoinpath = outer_path;
2361 pathnode->jpath.innerjoinpath = inner_path;
2362 pathnode->jpath.joinrestrictinfo = restrict_clauses;
2363
2364 final_cost_nestloop(root, pathnode, workspace, extra);
2365
2366 return pathnode;
2367}
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
void final_cost_nestloop(PlannerInfo *root, NestPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra)
Definition: costsize.c:3355
Bitmapset * get_param_path_clause_serials(Path *path)
Definition: relnode.c:2032
JoinPath jpath
Definition: pathnodes.h:2314
int rinfo_serial
Definition: pathnodes.h:2864

References bms_is_member(), bms_overlap(), RelOptInfo::consider_parallel, final_cost_nestloop(), get_joinrel_parampathinfo(), get_param_path_clause_serials(), JoinPath::inner_unique, JoinPathExtraData::inner_unique, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, JoinPath::jointype, NestPath::jpath, lappend(), lfirst, makeNode, NIL, JoinPath::outerjoinpath, Path::parallel_safe, Path::parallel_workers, PATH_REQ_OUTER, RelOptInfo::reltarget, RestrictInfo::rinfo_serial, root, and JoinPathExtraData::sjinfo.

Referenced by try_nestloop_path(), and try_partial_nestloop_path().

◆ create_projection_path()

ProjectionPath * create_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2524 of file pathnode.c.

2528{
2530 PathTarget *oldtarget;
2531
2532 /*
2533 * We mustn't put a ProjectionPath directly above another; it's useless
2534 * and will confuse create_projection_plan. Rather than making sure all
2535 * callers handle that, let's implement it here, by stripping off any
2536 * ProjectionPath in what we're given. Given this rule, there won't be
2537 * more than one.
2538 */
2540 {
2542
2543 Assert(subpp->path.parent == rel);
2544 subpath = subpp->subpath;
2546 }
2547
2548 pathnode->path.pathtype = T_Result;
2549 pathnode->path.parent = rel;
2550 pathnode->path.pathtarget = target;
2551 pathnode->path.param_info = subpath->param_info;
2552 pathnode->path.parallel_aware = false;
2553 pathnode->path.parallel_safe = rel->consider_parallel &&
2554 subpath->parallel_safe &&
2555 is_parallel_safe(root, (Node *) target->exprs);
2556 pathnode->path.parallel_workers = subpath->parallel_workers;
2557 /* Projection does not change the sort order */
2558 pathnode->path.pathkeys = subpath->pathkeys;
2559
2560 pathnode->subpath = subpath;
2561
2562 /*
2563 * We might not need a separate Result node. If the input plan node type
2564 * can project, we can just tell it to project something else. Or, if it
2565 * can't project but the desired target has the same expression list as
2566 * what the input will produce anyway, we can still give it the desired
2567 * tlist (possibly changing its ressortgroupref labels, but nothing else).
2568 * Note: in the latter case, create_projection_plan has to recheck our
2569 * conclusion; see comments therein.
2570 */
2571 oldtarget = subpath->pathtarget;
2573 equal(oldtarget->exprs, target->exprs))
2574 {
2575 /* No separate Result node needed */
2576 pathnode->dummypp = true;
2577
2578 /*
2579 * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2580 */
2581 pathnode->path.rows = subpath->rows;
2582 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2583 pathnode->path.startup_cost = subpath->startup_cost +
2584 (target->cost.startup - oldtarget->cost.startup);
2585 pathnode->path.total_cost = subpath->total_cost +
2586 (target->cost.startup - oldtarget->cost.startup) +
2587 (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2588 }
2589 else
2590 {
2591 /* We really do need the Result node */
2592 pathnode->dummypp = false;
2593
2594 /*
2595 * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2596 * evaluating the tlist. There is no qual to worry about.
2597 */
2598 pathnode->path.rows = subpath->rows;
2599 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2600 pathnode->path.startup_cost = subpath->startup_cost +
2601 target->cost.startup;
2602 pathnode->path.total_cost = subpath->total_cost +
2603 target->cost.startup +
2604 (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2605 }
2606
2607 return pathnode;
2608}
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
Path * subpath
Definition: pathnodes.h:2404

References Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, Path::disabled_nodes, ProjectionPath::dummypp, equal(), PathTarget::exprs, is_parallel_safe(), is_projection_capable_path(), IsA, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectionPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectionPath::subpath, and Path::total_cost.

Referenced by add_paths_with_pathkeys_for_rel(), adjust_paths_for_srfs(), apply_projection_to_path(), apply_scanjoin_target_to_paths(), create_final_unique_paths(), create_partial_grouping_paths(), create_partial_unique_paths(), generate_grouped_paths(), and recurse_set_operations().

◆ create_recursiveunion_path()

RecursiveUnionPath * create_recursiveunion_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
PathTarget target,
List distinctList,
int  wtParam,
double  numGroups 
)

Definition at line 3522 of file pathnode.c.

3530{
3532
3533 pathnode->path.pathtype = T_RecursiveUnion;
3534 pathnode->path.parent = rel;
3535 pathnode->path.pathtarget = target;
3536 /* For now, assume we are above any joins, so no parameterization */
3537 pathnode->path.param_info = NULL;
3538 pathnode->path.parallel_aware = false;
3539 pathnode->path.parallel_safe = rel->consider_parallel &&
3540 leftpath->parallel_safe && rightpath->parallel_safe;
3541 /* Foolish, but we'll do it like joins for now: */
3542 pathnode->path.parallel_workers = leftpath->parallel_workers;
3543 /* RecursiveUnion result is always unsorted */
3544 pathnode->path.pathkeys = NIL;
3545
3546 pathnode->leftpath = leftpath;
3547 pathnode->rightpath = rightpath;
3548 pathnode->distinctList = distinctList;
3549 pathnode->wtParam = wtParam;
3550 pathnode->numGroups = numGroups;
3551
3552 cost_recursive_union(&pathnode->path, leftpath, rightpath);
3553
3554 return pathnode;
3555}
void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
Definition: costsize.c:1800
Cardinality numGroups
Definition: pathnodes.h:2581

References RelOptInfo::consider_parallel, cost_recursive_union(), RecursiveUnionPath::distinctList, RecursiveUnionPath::leftpath, makeNode, NIL, RecursiveUnionPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, RecursiveUnionPath::path, Path::pathkeys, Path::pathtype, RecursiveUnionPath::rightpath, and RecursiveUnionPath::wtParam.

Referenced by generate_recursion_path().

◆ create_rel_agg_info()

RelAggInfo * create_rel_agg_info ( PlannerInfo root,
RelOptInfo rel,
bool  calculate_grouped_rows 
)

Definition at line 2655 of file relnode.c.

2657{
2658 ListCell *lc;
2659 RelAggInfo *result;
2660 PathTarget *agg_input;
2661 PathTarget *target;
2662 List *group_clauses = NIL;
2663 List *group_exprs = NIL;
2664
2665 /*
2666 * The lists of aggregate expressions and grouping expressions should have
2667 * been constructed.
2668 */
2669 Assert(root->agg_clause_list != NIL);
2670 Assert(root->group_expr_list != NIL);
2671
2672 /*
2673 * If this is a child rel, the grouped rel for its parent rel must have
2674 * been created if it can. So we can just use parent's RelAggInfo if
2675 * there is one, with appropriate variable substitutions.
2676 */
2677 if (IS_OTHER_REL(rel))
2678 {
2679 RelOptInfo *grouped_rel;
2680 RelAggInfo *agg_info;
2681
2682 grouped_rel = rel->top_parent->grouped_rel;
2683 if (grouped_rel == NULL)
2684 return NULL;
2685
2686 Assert(IS_GROUPED_REL(grouped_rel));
2687
2688 /* Must do multi-level transformation */
2689 agg_info = (RelAggInfo *)
2691 (Node *) grouped_rel->agg_info,
2692 rel,
2693 rel->top_parent);
2694
2695 agg_info->apply_agg_at = NULL; /* caller will change this later */
2696
2697 if (calculate_grouped_rows)
2698 {
2699 agg_info->grouped_rows =
2701 rel->rows, NULL, NULL);
2702
2703 /*
2704 * The grouped paths for the given relation are considered useful
2705 * iff the average group size is no less than
2706 * min_eager_agg_group_size.
2707 */
2708 agg_info->agg_useful =
2709 (rel->rows / agg_info->grouped_rows) >= min_eager_agg_group_size;
2710 }
2711
2712 return agg_info;
2713 }
2714
2715 /* Check if it's possible to produce grouped paths for this relation. */
2717 return NULL;
2718
2719 /*
2720 * Create targets for the grouped paths and for the input paths of the
2721 * grouped paths.
2722 */
2723 target = create_empty_pathtarget();
2724 agg_input = create_empty_pathtarget();
2725
2726 /* ... and initialize these targets */
2727 if (!init_grouping_targets(root, rel, target, agg_input,
2728 &group_clauses, &group_exprs))
2729 return NULL;
2730
2731 /*
2732 * Eager aggregation is not applicable if there are no available grouping
2733 * expressions.
2734 */
2735 if (group_clauses == NIL)
2736 return NULL;
2737
2738 /* Add aggregates to the grouping target */
2739 foreach(lc, root->agg_clause_list)
2740 {
2741 AggClauseInfo *ac_info = lfirst_node(AggClauseInfo, lc);
2742 Aggref *aggref;
2743
2744 Assert(IsA(ac_info->aggref, Aggref));
2745
2746 aggref = (Aggref *) copyObject(ac_info->aggref);
2748
2749 add_column_to_pathtarget(target, (Expr *) aggref, 0);
2750 }
2751
2752 /* Set the estimated eval cost and output width for both targets */
2754 set_pathtarget_cost_width(root, agg_input);
2755
2756 /* build the RelAggInfo result */
2757 result = makeNode(RelAggInfo);
2758 result->target = target;
2759 result->agg_input = agg_input;
2760 result->group_clauses = group_clauses;
2761 result->group_exprs = group_exprs;
2762 result->apply_agg_at = NULL; /* caller will change this later */
2763
2764 if (calculate_grouped_rows)
2765 {
2767 rel->rows, NULL, NULL);
2768
2769 /*
2770 * The grouped paths for the given relation are considered useful iff
2771 * the average group size is no less than min_eager_agg_group_size.
2772 */
2773 result->agg_useful =
2774 (rel->rows / result->grouped_rows) >= min_eager_agg_group_size;
2775 }
2776
2777 return result;
2778}
double min_eager_agg_group_size
Definition: allpaths.c:84
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:592
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6376
#define copyObject(obj)
Definition: nodes.h:232
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:389
#define IS_GROUPED_REL(rel)
Definition: pathnodes.h:1161
#define lfirst_node(type, lc)
Definition: pg_list.h:176
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5762
static bool init_grouping_targets(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, PathTarget *agg_input, List **group_clauses, List **group_exprs)
Definition: relnode.c:2865
static bool eager_aggregation_possible_for_relation(PlannerInfo *root, RelOptInfo *rel)
Definition: relnode.c:2785
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3768
Aggref * aggref
Definition: pathnodes.h:3373
List * group_exprs
Definition: pathnodes.h:1203
List * group_clauses
Definition: pathnodes.h:1201
struct PathTarget * agg_input
Definition: pathnodes.h:1198
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695

References add_column_to_pathtarget(), adjust_appendrel_attrs_multilevel(), RelOptInfo::agg_info, RelAggInfo::agg_input, RelAggInfo::agg_useful, AggClauseInfo::aggref, AGGSPLIT_INITIAL_SERIAL, RelAggInfo::apply_agg_at, Assert(), copyObject, create_empty_pathtarget(), eager_aggregation_possible_for_relation(), estimate_num_groups(), RelAggInfo::group_clauses, RelAggInfo::group_exprs, RelOptInfo::grouped_rel, RelAggInfo::grouped_rows, init_grouping_targets(), IS_GROUPED_REL, IS_OTHER_REL, IsA, lfirst_node, makeNode, mark_partial_aggref(), min_eager_agg_group_size, NIL, root, RelOptInfo::rows, set_pathtarget_cost_width(), and RelAggInfo::target.

Referenced by build_simple_grouped_rel(), and make_grouped_join_rel().

◆ create_resultscan_path()

Path * create_resultscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2006 of file pathnode.c.

2008{
2009 Path *pathnode = makeNode(Path);
2010
2011 pathnode->pathtype = T_Result;
2012 pathnode->parent = rel;
2013 pathnode->pathtarget = rel->reltarget;
2014 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2015 required_outer);
2016 pathnode->parallel_aware = false;
2017 pathnode->parallel_safe = rel->consider_parallel;
2018 pathnode->parallel_workers = 0;
2019 pathnode->pathkeys = NIL; /* result is always unordered */
2020
2021 cost_resultscan(pathnode, root, rel, pathnode->param_info);
2022
2023 return pathnode;
2024}
void cost_resultscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1762

References RelOptInfo::consider_parallel, cost_resultscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_result_pathlist().

◆ create_samplescan_path()

Path * create_samplescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1008 of file pathnode.c.

1009{
1010 Path *pathnode = makeNode(Path);
1011
1012 pathnode->pathtype = T_SampleScan;
1013 pathnode->parent = rel;
1014 pathnode->pathtarget = rel->reltarget;
1015 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1016 required_outer);
1017 pathnode->parallel_aware = false;
1018 pathnode->parallel_safe = rel->consider_parallel;
1019 pathnode->parallel_workers = 0;
1020 pathnode->pathkeys = NIL; /* samplescan has unordered result */
1021
1022 cost_samplescan(pathnode, root, rel, pathnode->param_info);
1023
1024 return pathnode;
1025}
void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:344

References RelOptInfo::consider_parallel, cost_samplescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by reparameterize_path(), and set_tablesample_rel_pathlist().

◆ create_seqscan_path()

Path * create_seqscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer,
int  parallel_workers 
)

Definition at line 983 of file pathnode.c.

985{
986 Path *pathnode = makeNode(Path);
987
988 pathnode->pathtype = T_SeqScan;
989 pathnode->parent = rel;
990 pathnode->pathtarget = rel->reltarget;
991 pathnode->param_info = get_baserel_parampathinfo(root, rel,
992 required_outer);
993 pathnode->parallel_aware = (parallel_workers > 0);
994 pathnode->parallel_safe = rel->consider_parallel;
995 pathnode->parallel_workers = parallel_workers;
996 pathnode->pathkeys = NIL; /* seqscan has unordered result */
997
998 cost_seqscan(pathnode, root, rel, pathnode->param_info);
999
1000 return pathnode;
1001}
void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:269

References RelOptInfo::consider_parallel, cost_seqscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by create_plain_partial_paths(), plan_cluster_use_sort(), reparameterize_path(), and set_plain_rel_pathlist().

◆ create_set_projection_path()

ProjectSetPath * create_set_projection_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target 
)

Definition at line 2722 of file pathnode.c.

2726{
2728 double tlist_rows;
2729 ListCell *lc;
2730
2731 pathnode->path.pathtype = T_ProjectSet;
2732 pathnode->path.parent = rel;
2733 pathnode->path.pathtarget = target;
2734 /* For now, assume we are above any joins, so no parameterization */
2735 pathnode->path.param_info = NULL;
2736 pathnode->path.parallel_aware = false;
2737 pathnode->path.parallel_safe = rel->consider_parallel &&
2738 subpath->parallel_safe &&
2739 is_parallel_safe(root, (Node *) target->exprs);
2740 pathnode->path.parallel_workers = subpath->parallel_workers;
2741 /* Projection does not change the sort order XXX? */
2742 pathnode->path.pathkeys = subpath->pathkeys;
2743
2744 pathnode->subpath = subpath;
2745
2746 /*
2747 * Estimate number of rows produced by SRFs for each row of input; if
2748 * there's more than one in this node, use the maximum.
2749 */
2750 tlist_rows = 1;
2751 foreach(lc, target->exprs)
2752 {
2753 Node *node = (Node *) lfirst(lc);
2754 double itemrows;
2755
2756 itemrows = expression_returns_set_rows(root, node);
2757 if (tlist_rows < itemrows)
2758 tlist_rows = itemrows;
2759 }
2760
2761 /*
2762 * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2763 * per input row, and half of cpu_tuple_cost for each added output row.
2764 * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2765 * this estimate later.
2766 */
2767 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2768 pathnode->path.rows = subpath->rows * tlist_rows;
2769 pathnode->path.startup_cost = subpath->startup_cost +
2770 target->cost.startup;
2771 pathnode->path.total_cost = subpath->total_cost +
2772 target->cost.startup +
2773 (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2774 (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2775
2776 return pathnode;
2777}
double expression_returns_set_rows(PlannerInfo *root, Node *clause)
Definition: clauses.c:299
Path * subpath
Definition: pathnodes.h:2416

References RelOptInfo::consider_parallel, PathTarget::cost, cpu_tuple_cost, Path::disabled_nodes, expression_returns_set_rows(), PathTarget::exprs, is_parallel_safe(), lfirst, makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, ProjectSetPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, root, Path::rows, QualCost::startup, Path::startup_cost, subpath(), ProjectSetPath::subpath, and Path::total_cost.

Referenced by adjust_paths_for_srfs().

◆ create_setop_path()

SetOpPath * create_setop_path ( PlannerInfo root,
RelOptInfo rel,
Path leftpath,
Path rightpath,
SetOpCmd  cmd,
SetOpStrategy  strategy,
List groupList,
double  numGroups,
double  outputRows 
)

Definition at line 3403 of file pathnode.c.

3412{
3413 SetOpPath *pathnode = makeNode(SetOpPath);
3414
3415 pathnode->path.pathtype = T_SetOp;
3416 pathnode->path.parent = rel;
3417 pathnode->path.pathtarget = rel->reltarget;
3418 /* For now, assume we are above any joins, so no parameterization */
3419 pathnode->path.param_info = NULL;
3420 pathnode->path.parallel_aware = false;
3421 pathnode->path.parallel_safe = rel->consider_parallel &&
3422 leftpath->parallel_safe && rightpath->parallel_safe;
3423 pathnode->path.parallel_workers =
3424 leftpath->parallel_workers + rightpath->parallel_workers;
3425 /* SetOp preserves the input sort order if in sort mode */
3426 pathnode->path.pathkeys =
3427 (strategy == SETOP_SORTED) ? leftpath->pathkeys : NIL;
3428
3429 pathnode->leftpath = leftpath;
3430 pathnode->rightpath = rightpath;
3431 pathnode->cmd = cmd;
3432 pathnode->strategy = strategy;
3433 pathnode->groupList = groupList;
3434 pathnode->numGroups = numGroups;
3435
3436 /*
3437 * Compute cost estimates. As things stand, we end up with the same total
3438 * cost in this node for sort and hash methods, but different startup
3439 * costs. This could be refined perhaps, but it'll do for now.
3440 */
3441 pathnode->path.disabled_nodes =
3442 leftpath->disabled_nodes + rightpath->disabled_nodes;
3443 if (strategy == SETOP_SORTED)
3444 {
3445 /*
3446 * In sorted mode, we can emit output incrementally. Charge one
3447 * cpu_operator_cost per comparison per input tuple. Like cost_group,
3448 * we assume all columns get compared at most of the tuples.
3449 */
3450 pathnode->path.startup_cost =
3451 leftpath->startup_cost + rightpath->startup_cost;
3452 pathnode->path.total_cost =
3453 leftpath->total_cost + rightpath->total_cost +
3454 cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3455
3456 /*
3457 * Also charge a small amount per extracted tuple. Like cost_sort,
3458 * charge only operator cost not cpu_tuple_cost, since SetOp does no
3459 * qual-checking or projection.
3460 */
3461 pathnode->path.total_cost += cpu_operator_cost * outputRows;
3462 }
3463 else
3464 {
3465 Size hashtablesize;
3466
3467 /*
3468 * In hashed mode, we must read all the input before we can emit
3469 * anything. Also charge comparison costs to represent the cost of
3470 * hash table lookups.
3471 */
3472 pathnode->path.startup_cost =
3473 leftpath->total_cost + rightpath->total_cost +
3474 cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3475 pathnode->path.total_cost = pathnode->path.startup_cost;
3476
3477 /*
3478 * Also charge a small amount per extracted tuple. Like cost_sort,
3479 * charge only operator cost not cpu_tuple_cost, since SetOp does no
3480 * qual-checking or projection.
3481 */
3482 pathnode->path.total_cost += cpu_operator_cost * outputRows;
3483
3484 /*
3485 * Mark the path as disabled if enable_hashagg is off. While this
3486 * isn't exactly a HashAgg node, it seems close enough to justify
3487 * letting that switch control it.
3488 */
3489 if (!enable_hashagg)
3490 pathnode->path.disabled_nodes++;
3491
3492 /*
3493 * Also disable if it doesn't look like the hashtable will fit into
3494 * hash_mem. (Note: reject on equality, to ensure that an estimate of
3495 * SIZE_MAX disables hashing regardless of the hash_mem limit.)
3496 */
3497 hashtablesize = EstimateSetOpHashTableSpace(numGroups,
3498 leftpath->pathtarget->width);
3499 if (hashtablesize >= get_hash_memory_limit())
3500 pathnode->path.disabled_nodes++;
3501 }
3502 pathnode->path.rows = outputRows;
3503
3504 return pathnode;
3505}
size_t Size
Definition: c.h:615
double cpu_operator_cost
Definition: costsize.c:134
bool enable_hashagg
Definition: costsize.c:152
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3621
Size EstimateSetOpHashTableSpace(double nentries, Size tupleWidth)
Definition: nodeSetOp.c:115
@ SETOP_SORTED
Definition: nodes.h:416
Path * rightpath
Definition: pathnodes.h:2564
Cardinality numGroups
Definition: pathnodes.h:2568
Path * leftpath
Definition: pathnodes.h:2563
SetOpCmd cmd
Definition: pathnodes.h:2565
Path path
Definition: pathnodes.h:2562
SetOpStrategy strategy
Definition: pathnodes.h:2566
List * groupList
Definition: pathnodes.h:2567

References SetOpPath::cmd, RelOptInfo::consider_parallel, cpu_operator_cost, Path::disabled_nodes, enable_hashagg, EstimateSetOpHashTableSpace(), get_hash_memory_limit(), SetOpPath::groupList, if(), SetOpPath::leftpath, list_length(), makeNode, NIL, SetOpPath::numGroups, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SetOpPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, SetOpPath::rightpath, Path::rows, SETOP_SORTED, Path::startup_cost, SetOpPath::strategy, and Path::total_cost.

Referenced by generate_nonunion_paths().

◆ create_sort_path()

SortPath * create_sort_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
List pathkeys,
double  limit_tuples 
)

Definition at line 2841 of file pathnode.c.

2846{
2847 SortPath *pathnode = makeNode(SortPath);
2848
2849 pathnode->path.pathtype = T_Sort;
2850 pathnode->path.parent = rel;
2851 /* Sort doesn't project, so use source path's pathtarget */
2852 pathnode->path.pathtarget = subpath->pathtarget;
2853 pathnode->path.param_info = subpath->param_info;
2854 pathnode->path.parallel_aware = false;
2855 pathnode->path.parallel_safe = rel->consider_parallel &&
2856 subpath->parallel_safe;
2857 pathnode->path.parallel_workers = subpath->parallel_workers;
2858 pathnode->path.pathkeys = pathkeys;
2859
2860 pathnode->subpath = subpath;
2861
2862 cost_sort(&pathnode->path, root, pathkeys,
2863 subpath->disabled_nodes,
2864 subpath->total_cost,
2865 subpath->rows,
2866 subpath->pathtarget->width,
2867 0.0, /* XXX comparison_cost shouldn't be 0? */
2868 work_mem, limit_tuples);
2869
2870 return pathnode;
2871}

References RelOptInfo::consider_parallel, cost_sort(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SortPath::path, Path::pathkeys, Path::pathtype, root, subpath(), SortPath::subpath, and work_mem.

Referenced by add_paths_with_pathkeys_for_rel(), build_setop_child_paths(), create_final_unique_paths(), create_one_window_path(), create_ordered_paths(), create_partial_unique_paths(), gather_grouping_paths(), generate_grouped_paths(), generate_nonunion_paths(), generate_union_paths(), generate_useful_gather_paths(), and make_ordered_path().

◆ create_subqueryscan_path()

SubqueryScanPath * create_subqueryscan_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
bool  trivial_pathtarget,
List pathkeys,
Relids  required_outer 
)

Definition at line 1846 of file pathnode.c.

1849{
1851
1852 pathnode->path.pathtype = T_SubqueryScan;
1853 pathnode->path.parent = rel;
1854 pathnode->path.pathtarget = rel->reltarget;
1855 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1856 required_outer);
1857 pathnode->path.parallel_aware = false;
1858 pathnode->path.parallel_safe = rel->consider_parallel &&
1859 subpath->parallel_safe;
1860 pathnode->path.parallel_workers = subpath->parallel_workers;
1861 pathnode->path.pathkeys = pathkeys;
1862 pathnode->subpath = subpath;
1863
1864 cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
1865 trivial_pathtarget);
1866
1867 return pathnode;
1868}
void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info, bool trivial_pathtarget)
Definition: costsize.c:1431

References RelOptInfo::consider_parallel, cost_subqueryscan(), get_baserel_parampathinfo(), makeNode, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, SubqueryScanPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, subpath(), and SubqueryScanPath::subpath.

Referenced by build_setop_child_paths(), reparameterize_path(), and set_subquery_pathlist().

◆ create_tablefuncscan_path()

Path * create_tablefuncscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1902 of file pathnode.c.

1904{
1905 Path *pathnode = makeNode(Path);
1906
1907 pathnode->pathtype = T_TableFuncScan;
1908 pathnode->parent = rel;
1909 pathnode->pathtarget = rel->reltarget;
1910 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1911 required_outer);
1912 pathnode->parallel_aware = false;
1913 pathnode->parallel_safe = rel->consider_parallel;
1914 pathnode->parallel_workers = 0;
1915 pathnode->pathkeys = NIL; /* result is always unordered */
1916
1917 cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
1918
1919 return pathnode;
1920}
void cost_tablefuncscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1574

References RelOptInfo::consider_parallel, cost_tablefuncscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_tablefunc_pathlist().

◆ create_tidrangescan_path()

TidRangePath * create_tidrangescan_path ( PlannerInfo root,
RelOptInfo rel,
List tidrangequals,
Relids  required_outer 
)

Definition at line 1264 of file pathnode.c.

1266{
1267 TidRangePath *pathnode = makeNode(TidRangePath);
1268
1269 pathnode->path.pathtype = T_TidRangeScan;
1270 pathnode->path.parent = rel;
1271 pathnode->path.pathtarget = rel->reltarget;
1272 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1273 required_outer);
1274 pathnode->path.parallel_aware = false;
1275 pathnode->path.parallel_safe = rel->consider_parallel;
1276 pathnode->path.parallel_workers = 0;
1277 pathnode->path.pathkeys = NIL; /* always unordered */
1278
1279 pathnode->tidrangequals = tidrangequals;
1280
1281 cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1282 pathnode->path.param_info);
1283
1284 return pathnode;
1285}
void cost_tidrangescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidrangequals, ParamPathInfo *param_info)
Definition: costsize.c:1337
List * tidrangequals
Definition: pathnodes.h:2084

References RelOptInfo::consider_parallel, cost_tidrangescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidRangePath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidRangePath::tidrangequals.

Referenced by create_tidscan_paths().

◆ create_tidscan_path()

TidPath * create_tidscan_path ( PlannerInfo root,
RelOptInfo rel,
List tidquals,
Relids  required_outer 
)

Definition at line 1235 of file pathnode.c.

1237{
1238 TidPath *pathnode = makeNode(TidPath);
1239
1240 pathnode->path.pathtype = T_TidScan;
1241 pathnode->path.parent = rel;
1242 pathnode->path.pathtarget = rel->reltarget;
1243 pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1244 required_outer);
1245 pathnode->path.parallel_aware = false;
1246 pathnode->path.parallel_safe = rel->consider_parallel;
1247 pathnode->path.parallel_workers = 0;
1248 pathnode->path.pathkeys = NIL; /* always unordered */
1249
1250 pathnode->tidquals = tidquals;
1251
1252 cost_tidscan(&pathnode->path, root, rel, tidquals,
1253 pathnode->path.param_info);
1254
1255 return pathnode;
1256}
void cost_tidscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
Definition: costsize.c:1232
List * tidquals
Definition: pathnodes.h:2072
Path path
Definition: pathnodes.h:2071

References RelOptInfo::consider_parallel, cost_tidscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, TidPath::path, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, root, and TidPath::tidquals.

Referenced by BuildParameterizedTidPaths(), and create_tidscan_paths().

◆ create_unique_path()

UniquePath * create_unique_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
int  numCols,
double  numGroups 
)

Definition at line 2942 of file pathnode.c.

2947{
2948 UniquePath *pathnode = makeNode(UniquePath);
2949
2950 pathnode->path.pathtype = T_Unique;
2951 pathnode->path.parent = rel;
2952 /* Unique doesn't project, so use source path's pathtarget */
2953 pathnode->path.pathtarget = subpath->pathtarget;
2954 pathnode->path.param_info = subpath->param_info;
2955 pathnode->path.parallel_aware = false;
2956 pathnode->path.parallel_safe = rel->consider_parallel &&
2957 subpath->parallel_safe;
2958 pathnode->path.parallel_workers = subpath->parallel_workers;
2959 /* Unique doesn't change the input ordering */
2960 pathnode->path.pathkeys = subpath->pathkeys;
2961
2962 pathnode->subpath = subpath;
2963 pathnode->numkeys = numCols;
2964
2965 /*
2966 * Charge one cpu_operator_cost per comparison per input tuple. We assume
2967 * all columns get compared at most of the tuples. (XXX probably this is
2968 * an overestimate.)
2969 */
2970 pathnode->path.disabled_nodes = subpath->disabled_nodes;
2971 pathnode->path.startup_cost = subpath->startup_cost;
2972 pathnode->path.total_cost = subpath->total_cost +
2973 cpu_operator_cost * subpath->rows * numCols;
2974 pathnode->path.rows = numGroups;
2975
2976 return pathnode;
2977}
Path * subpath
Definition: pathnodes.h:2470

References RelOptInfo::consider_parallel, cpu_operator_cost, Path::disabled_nodes, makeNode, UniquePath::numkeys, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, UniquePath::path, Path::pathkeys, Path::pathtype, Path::rows, Path::startup_cost, subpath(), UniquePath::subpath, and Path::total_cost.

Referenced by create_final_distinct_paths(), create_final_unique_paths(), create_partial_distinct_paths(), create_partial_unique_paths(), and generate_union_paths().

◆ create_valuesscan_path()

Path * create_valuesscan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 1928 of file pathnode.c.

1930{
1931 Path *pathnode = makeNode(Path);
1932
1933 pathnode->pathtype = T_ValuesScan;
1934 pathnode->parent = rel;
1935 pathnode->pathtarget = rel->reltarget;
1936 pathnode->param_info = get_baserel_parampathinfo(root, rel,
1937 required_outer);
1938 pathnode->parallel_aware = false;
1939 pathnode->parallel_safe = rel->consider_parallel;
1940 pathnode->parallel_workers = 0;
1941 pathnode->pathkeys = NIL; /* result is always unordered */
1942
1943 cost_valuesscan(pathnode, root, rel, pathnode->param_info);
1944
1945 return pathnode;
1946}
void cost_valuesscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info)
Definition: costsize.c:1631

References RelOptInfo::consider_parallel, cost_valuesscan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_values_pathlist().

◆ create_windowagg_path()

WindowAggPath * create_windowagg_path ( PlannerInfo root,
RelOptInfo rel,
Path subpath,
PathTarget target,
List windowFuncs,
List runCondition,
WindowClause winclause,
List qual,
bool  topwindow 
)

Definition at line 3330 of file pathnode.c.

3339{
3341
3342 /* qual can only be set for the topwindow */
3343 Assert(qual == NIL || topwindow);
3344
3345 pathnode->path.pathtype = T_WindowAgg;
3346 pathnode->path.parent = rel;
3347 pathnode->path.pathtarget = target;
3348 /* For now, assume we are above any joins, so no parameterization */
3349 pathnode->path.param_info = NULL;
3350 pathnode->path.parallel_aware = false;
3351 pathnode->path.parallel_safe = rel->consider_parallel &&
3352 subpath->parallel_safe;
3353 pathnode->path.parallel_workers = subpath->parallel_workers;
3354 /* WindowAgg preserves the input sort order */
3355 pathnode->path.pathkeys = subpath->pathkeys;
3356
3357 pathnode->subpath = subpath;
3358 pathnode->winclause = winclause;
3359 pathnode->qual = qual;
3360 pathnode->runCondition = runCondition;
3361 pathnode->topwindow = topwindow;
3362
3363 /*
3364 * For costing purposes, assume that there are no redundant partitioning
3365 * or ordering columns; it's not worth the trouble to deal with that
3366 * corner case here. So we just pass the unmodified list lengths to
3367 * cost_windowagg.
3368 */
3369 cost_windowagg(&pathnode->path, root,
3370 windowFuncs,
3371 winclause,
3372 subpath->disabled_nodes,
3373 subpath->startup_cost,
3374 subpath->total_cost,
3375 subpath->rows);
3376
3377 /* add tlist eval cost for each output row */
3378 pathnode->path.startup_cost += target->cost.startup;
3379 pathnode->path.total_cost += target->cost.startup +
3380 target->cost.per_tuple * pathnode->path.rows;
3381
3382 return pathnode;
3383}
void cost_windowagg(Path *path, PlannerInfo *root, List *windowFuncs, WindowClause *winclause, int input_disabled_nodes, Cost input_startup_cost, Cost input_total_cost, double input_tuples)
Definition: costsize.c:3104
List * runCondition
Definition: pathnodes.h:2552
Path * subpath
Definition: pathnodes.h:2549
WindowClause * winclause
Definition: pathnodes.h:2550

References Assert(), RelOptInfo::consider_parallel, PathTarget::cost, cost_windowagg(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, WindowAggPath::path, Path::pathkeys, Path::pathtype, QualCost::per_tuple, WindowAggPath::qual, root, Path::rows, WindowAggPath::runCondition, QualCost::startup, Path::startup_cost, subpath(), WindowAggPath::subpath, WindowAggPath::topwindow, Path::total_cost, and WindowAggPath::winclause.

Referenced by create_one_window_path().

◆ create_worktablescan_path()

Path * create_worktablescan_path ( PlannerInfo root,
RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2032 of file pathnode.c.

2034{
2035 Path *pathnode = makeNode(Path);
2036
2037 pathnode->pathtype = T_WorkTableScan;
2038 pathnode->parent = rel;
2039 pathnode->pathtarget = rel->reltarget;
2040 pathnode->param_info = get_baserel_parampathinfo(root, rel,
2041 required_outer);
2042 pathnode->parallel_aware = false;
2043 pathnode->parallel_safe = rel->consider_parallel;
2044 pathnode->parallel_workers = 0;
2045 pathnode->pathkeys = NIL; /* result is always unordered */
2046
2047 /* Cost is the same as for a regular CTE scan */
2048 cost_ctescan(pathnode, root, rel, pathnode->param_info);
2049
2050 return pathnode;
2051}

References RelOptInfo::consider_parallel, cost_ctescan(), get_baserel_parampathinfo(), makeNode, NIL, Path::parallel_aware, Path::parallel_safe, Path::parallel_workers, Path::pathkeys, Path::pathtype, RelOptInfo::reltarget, and root.

Referenced by set_worktable_pathlist().

◆ expand_planner_arrays()

void expand_planner_arrays ( PlannerInfo root,
int  add_size 
)

Definition at line 177 of file relnode.c.

178{
179 int new_size;
180
181 Assert(add_size > 0);
182
183 new_size = root->simple_rel_array_size + add_size;
184
185 root->simple_rel_array =
186 repalloc0_array(root->simple_rel_array, RelOptInfo *, root->simple_rel_array_size, new_size);
187
188 root->simple_rte_array =
189 repalloc0_array(root->simple_rte_array, RangeTblEntry *, root->simple_rel_array_size, new_size);
190
191 if (root->append_rel_array)
192 root->append_rel_array =
193 repalloc0_array(root->append_rel_array, AppendRelInfo *, root->simple_rel_array_size, new_size);
194 else
195 root->append_rel_array =
196 palloc0_array(AppendRelInfo *, new_size);
197
198 root->simple_rel_array_size = new_size;
199}
#define palloc0_array(type, count)
Definition: fe_memutils.h:77
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
Size add_size(Size s1, Size s2)
Definition: shmem.c:494

References add_size(), Assert(), palloc0_array, repalloc0_array, and root.

Referenced by expand_inherited_rtentry(), and expand_partitioned_rtentry().

◆ fetch_upper_rel()

RelOptInfo * fetch_upper_rel ( PlannerInfo root,
UpperRelationKind  kind,
Relids  relids 
)

Definition at line 1581 of file relnode.c.

1582{
1583 RelOptInfo *upperrel;
1584 ListCell *lc;
1585
1586 /*
1587 * For the moment, our indexing data structure is just a List for each
1588 * relation kind. If we ever get so many of one kind that this stops
1589 * working well, we can improve it. No code outside this function should
1590 * assume anything about how to find a particular upperrel.
1591 */
1592
1593 /* If we already made this upperrel for the query, return it */
1594 foreach(lc, root->upper_rels[kind])
1595 {
1596 upperrel = (RelOptInfo *) lfirst(lc);
1597
1598 if (bms_equal(upperrel->relids, relids))
1599 return upperrel;
1600 }
1601
1602 upperrel = makeNode(RelOptInfo);
1603 upperrel->reloptkind = RELOPT_UPPER_REL;
1604 upperrel->relids = bms_copy(relids);
1605
1606 /* cheap startup cost is interesting iff not all tuples to be retrieved */
1607 upperrel->consider_startup = (root->tuple_fraction > 0);
1608 upperrel->consider_param_startup = false;
1609 upperrel->consider_parallel = false; /* might get changed later */
1610 upperrel->reltarget = create_empty_pathtarget();
1611 upperrel->pathlist = NIL;
1612 upperrel->cheapest_startup_path = NULL;
1613 upperrel->cheapest_total_path = NULL;
1615
1616 root->upper_rels[kind] = lappend(root->upper_rels[kind], upperrel);
1617
1618 return upperrel;
1619}
@ RELOPT_UPPER_REL
Definition: pathnodes.h:887

References bms_copy(), bms_equal(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, RelOptInfo::consider_parallel, RelOptInfo::consider_param_startup, RelOptInfo::consider_startup, create_empty_pathtarget(), lappend(), lfirst, makeNode, NIL, RelOptInfo::pathlist, RelOptInfo::relids, RELOPT_UPPER_REL, RelOptInfo::reloptkind, RelOptInfo::reltarget, and root.

Referenced by add_rtes_to_flat_rtable(), build_setop_child_paths(), create_distinct_paths(), create_ordered_paths(), create_partial_distinct_paths(), create_partial_grouping_paths(), create_window_paths(), generate_nonunion_paths(), generate_recursion_path(), generate_union_paths(), grouping_planner(), make_grouping_rel(), make_subplan(), preprocess_minmax_aggregates(), set_subquery_pathlist(), set_subquery_size_estimates(), SS_process_ctes(), standard_planner(), and subquery_planner().

◆ find_base_rel()

◆ find_base_rel_ignore_join()

RelOptInfo * find_base_rel_ignore_join ( PlannerInfo root,
int  relid 
)

Definition at line 569 of file relnode.c.

570{
571 /* use an unsigned comparison to prevent negative array element access */
572 if ((uint32) relid < (uint32) root->simple_rel_array_size)
573 {
574 RelOptInfo *rel;
575 RangeTblEntry *rte;
576
577 rel = root->simple_rel_array[relid];
578 if (rel)
579 return rel;
580
581 /*
582 * We could just return NULL here, but for debugging purposes it seems
583 * best to actually verify that the relid is an outer join and not
584 * something weird.
585 */
586 rte = root->simple_rte_array[relid];
587 if (rte && rte->rtekind == RTE_JOIN && rte->jointype != JOIN_INNER)
588 return NULL;
589 }
590
591 elog(ERROR, "no relation entry for relid %d", relid);
592
593 return NULL; /* keep compiler quiet */
594}
JoinType jointype
Definition: parsenodes.h:1182

References elog, ERROR, JOIN_INNER, RangeTblEntry::jointype, root, RTE_JOIN, and RangeTblEntry::rtekind.

Referenced by add_join_clause_to_rels(), create_lateral_join_info(), eager_aggregation_possible_for_relation(), find_appinfos_by_relids(), and remove_join_clause_from_rels().

◆ find_base_rel_noerr()

RelOptInfo * find_base_rel_noerr ( PlannerInfo root,
int  relid 
)

Definition at line 551 of file relnode.c.

552{
553 /* use an unsigned comparison to prevent negative array element access */
554 if ((uint32) relid < (uint32) root->simple_rel_array_size)
555 return root->simple_rel_array[relid];
556 return NULL;
557}

References root.

Referenced by all_rows_selectable().

◆ find_childrel_parents()

Relids find_childrel_parents ( PlannerInfo root,
RelOptInfo rel 
)

Definition at line 1631 of file relnode.c.

1632{
1633 Relids result = NULL;
1634
1636 Assert(rel->relid > 0 && rel->relid < root->simple_rel_array_size);
1637
1638 do
1639 {
1640 AppendRelInfo *appinfo = root->append_rel_array[rel->relid];
1641 Index prelid = appinfo->parent_relid;
1642
1643 result = bms_add_member(result, prelid);
1644
1645 /* traverse up to the parent rel, loop if it's also a child rel */
1646 rel = find_base_rel(root, prelid);
1647 } while (rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
1648
1650
1651 return result;
1652}
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
unsigned int Index
Definition: c.h:624
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:529
Index parent_relid
Definition: pathnodes.h:3192

References Assert(), bms_add_member(), find_base_rel(), AppendRelInfo::parent_relid, RelOptInfo::relid, RELOPT_BASEREL, RELOPT_OTHER_MEMBER_REL, RelOptInfo::reloptkind, and root.

Referenced by check_index_predicates(), and generate_implied_equalities_for_column().

◆ find_join_rel()

RelOptInfo * find_join_rel ( PlannerInfo root,
Relids  relids 
)

Definition at line 642 of file relnode.c.

643{
644 /*
645 * Switch to using hash lookup when list grows "too long". The threshold
646 * is arbitrary and is known only here.
647 */
648 if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
650
651 /*
652 * Use either hashtable lookup or linear search, as appropriate.
653 *
654 * Note: the seemingly redundant hashkey variable is used to avoid taking
655 * the address of relids; unless the compiler is exceedingly smart, doing
656 * so would force relids out of a register and thus probably slow down the
657 * list-search case.
658 */
659 if (root->join_rel_hash)
660 {
661 Relids hashkey = relids;
662 JoinHashEntry *hentry;
663
664 hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
665 &hashkey,
666 HASH_FIND,
667 NULL);
668 if (hentry)
669 return hentry->join_rel;
670 }
671 else
672 {
673 ListCell *l;
674
675 foreach(l, root->join_rel_list)
676 {
677 RelOptInfo *rel = (RelOptInfo *) lfirst(l);
678
679 if (bms_equal(rel->relids, relids))
680 return rel;
681 }
682 }
683
684 return NULL;
685}
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
@ HASH_FIND
Definition: hsearch.h:113
static void build_join_rel_hash(PlannerInfo *root)
Definition: relnode.c:601
RelOptInfo * join_rel
Definition: relnode.c:47

References bms_equal(), build_join_rel_hash(), HASH_FIND, hash_search(), JoinHashEntry::join_rel, lfirst, list_length(), RelOptInfo::relids, and root.

Referenced by build_child_join_rel(), build_join_rel(), examine_variable(), find_join_input_rel(), get_matching_part_pairs(), and postgresPlanDirectModify().

◆ find_param_path_info()

ParamPathInfo * find_param_path_info ( RelOptInfo rel,
Relids  required_outer 
)

Definition at line 2011 of file relnode.c.

2012{
2013 ListCell *lc;
2014
2015 foreach(lc, rel->ppilist)
2016 {
2017 ParamPathInfo *ppi = (ParamPathInfo *) lfirst(lc);
2018
2019 if (bms_equal(ppi->ppi_req_outer, required_outer))
2020 return ppi;
2021 }
2022
2023 return NULL;
2024}
Relids ppi_req_outer
Definition: pathnodes.h:1826

References bms_equal(), lfirst, ParamPathInfo::ppi_req_outer, and RelOptInfo::ppilist.

Referenced by get_appendrel_parampathinfo(), get_baserel_parampathinfo(), get_joinrel_parampathinfo(), and reparameterize_path_by_child().

◆ get_appendrel_parampathinfo()

ParamPathInfo * get_appendrel_parampathinfo ( RelOptInfo appendrel,
Relids  required_outer 
)

Definition at line 1978 of file relnode.c.

1979{
1980 ParamPathInfo *ppi;
1981
1982 /* If rel has LATERAL refs, every path for it should account for them */
1983 Assert(bms_is_subset(appendrel->lateral_relids, required_outer));
1984
1985 /* Unparameterized paths have no ParamPathInfo */
1986 if (bms_is_empty(required_outer))
1987 return NULL;
1988
1989 Assert(!bms_overlap(appendrel->relids, required_outer));
1990
1991 /* If we already have a PPI for this parameterization, just return it */
1992 if ((ppi = find_param_path_info(appendrel, required_outer)))
1993 return ppi;
1994
1995 /* Else build the ParamPathInfo */
1996 ppi = makeNode(ParamPathInfo);
1997 ppi->ppi_req_outer = required_outer;
1998 ppi->ppi_rows = 0;
1999 ppi->ppi_clauses = NIL;
2000 ppi->ppi_serials = NULL;
2001 appendrel->ppilist = lappend(appendrel->ppilist, ppi);
2002
2003 return ppi;
2004}
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
ParamPathInfo * find_param_path_info(RelOptInfo *rel, Relids required_outer)
Definition: relnode.c:2011
Cardinality ppi_rows
Definition: pathnodes.h:1827
List * ppi_clauses
Definition: pathnodes.h:1828
Bitmapset * ppi_serials
Definition: pathnodes.h:1829

References Assert(), bms_is_empty, bms_is_subset(), bms_overlap(), find_param_path_info(), lappend(), RelOptInfo::lateral_relids, makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, and RelOptInfo::relids.

Referenced by create_append_path().

◆ get_baserel_parampathinfo()

ParamPathInfo * get_baserel_parampathinfo ( PlannerInfo root,
RelOptInfo baserel,
Relids  required_outer 
)

Definition at line 1667 of file relnode.c.

1669{
1670 ParamPathInfo *ppi;
1671 Relids joinrelids;
1672 List *pclauses;
1673 List *eqclauses;
1674 Bitmapset *pserials;
1675 double rows;
1676 ListCell *lc;
1677
1678 /* If rel has LATERAL refs, every path for it should account for them */
1679 Assert(bms_is_subset(baserel->lateral_relids, required_outer));
1680
1681 /* Unparameterized paths have no ParamPathInfo */
1682 if (bms_is_empty(required_outer))
1683 return NULL;
1684
1685 Assert(!bms_overlap(baserel->relids, required_outer));
1686
1687 /* If we already have a PPI for this parameterization, just return it */
1688 if ((ppi = find_param_path_info(baserel, required_outer)))
1689 return ppi;
1690
1691 /*
1692 * Identify all joinclauses that are movable to this base rel given this
1693 * parameterization.
1694 */
1695 joinrelids = bms_union(baserel->relids, required_outer);
1696 pclauses = NIL;
1697 foreach(lc, baserel->joininfo)
1698 {
1699 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1700
1702 baserel->relids,
1703 joinrelids))
1704 pclauses = lappend(pclauses, rinfo);
1705 }
1706
1707 /*
1708 * Add in joinclauses generated by EquivalenceClasses, too. (These
1709 * necessarily satisfy join_clause_is_movable_into; but in assert-enabled
1710 * builds, let's verify that.)
1711 */
1713 joinrelids,
1714 required_outer,
1715 baserel,
1716 NULL);
1717#ifdef USE_ASSERT_CHECKING
1718 foreach(lc, eqclauses)
1719 {
1720 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1721
1723 baserel->relids,
1724 joinrelids));
1725 }
1726#endif
1727 pclauses = list_concat(pclauses, eqclauses);
1728
1729 /* Compute set of serial numbers of the enforced clauses */
1730 pserials = NULL;
1731 foreach(lc, pclauses)
1732 {
1733 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1734
1735 pserials = bms_add_member(pserials, rinfo->rinfo_serial);
1736 }
1737
1738 /* Estimate the number of rows returned by the parameterized scan */
1739 rows = get_parameterized_baserel_size(root, baserel, pclauses);
1740
1741 /* And now we can build the ParamPathInfo */
1742 ppi = makeNode(ParamPathInfo);
1743 ppi->ppi_req_outer = required_outer;
1744 ppi->ppi_rows = rows;
1745 ppi->ppi_clauses = pclauses;
1746 ppi->ppi_serials = pserials;
1747 baserel->ppilist = lappend(baserel->ppilist, ppi);
1748
1749 return ppi;
1750}
double get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel, List *param_clauses)
Definition: costsize.c:5388
List * generate_join_implied_equalities(PlannerInfo *root, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel, SpecialJoinInfo *sjinfo)
Definition: equivclass.c:1550
bool join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer)
Definition: restrictinfo.c:661

References Assert(), bms_add_member(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), get_parameterized_baserel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, RestrictInfo::rinfo_serial, and root.

Referenced by create_append_path(), create_bitmap_and_path(), create_bitmap_heap_path(), create_bitmap_or_path(), create_ctescan_path(), create_foreignscan_path(), create_functionscan_path(), create_gather_merge_path(), create_gather_path(), create_index_path(), create_namedtuplestorescan_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), create_tablefuncscan_path(), create_tidrangescan_path(), create_tidscan_path(), create_valuesscan_path(), create_worktablescan_path(), postgresGetForeignPaths(), and reparameterize_path().

◆ get_joinrel_parampathinfo()

ParamPathInfo * get_joinrel_parampathinfo ( PlannerInfo root,
RelOptInfo joinrel,
Path outer_path,
Path inner_path,
SpecialJoinInfo sjinfo,
Relids  required_outer,
List **  restrict_clauses 
)

Definition at line 1781 of file relnode.c.

1787{
1788 ParamPathInfo *ppi;
1789 Relids join_and_req;
1790 Relids outer_and_req;
1791 Relids inner_and_req;
1792 List *pclauses;
1793 List *eclauses;
1794 List *dropped_ecs;
1795 double rows;
1796 ListCell *lc;
1797
1798 /* If rel has LATERAL refs, every path for it should account for them */
1799 Assert(bms_is_subset(joinrel->lateral_relids, required_outer));
1800
1801 /* Unparameterized paths have no ParamPathInfo or extra join clauses */
1802 if (bms_is_empty(required_outer))
1803 return NULL;
1804
1805 Assert(!bms_overlap(joinrel->relids, required_outer));
1806
1807 /*
1808 * Identify all joinclauses that are movable to this join rel given this
1809 * parameterization. These are the clauses that are movable into this
1810 * join, but not movable into either input path. Treat an unparameterized
1811 * input path as not accepting parameterized clauses (because it won't,
1812 * per the shortcut exit above), even though the joinclause movement rules
1813 * might allow the same clauses to be moved into a parameterized path for
1814 * that rel.
1815 */
1816 join_and_req = bms_union(joinrel->relids, required_outer);
1817 if (outer_path->param_info)
1818 outer_and_req = bms_union(outer_path->parent->relids,
1819 PATH_REQ_OUTER(outer_path));
1820 else
1821 outer_and_req = NULL; /* outer path does not accept parameters */
1822 if (inner_path->param_info)
1823 inner_and_req = bms_union(inner_path->parent->relids,
1824 PATH_REQ_OUTER(inner_path));
1825 else
1826 inner_and_req = NULL; /* inner path does not accept parameters */
1827
1828 pclauses = NIL;
1829 foreach(lc, joinrel->joininfo)
1830 {
1831 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1832
1834 joinrel->relids,
1835 join_and_req) &&
1837 outer_path->parent->relids,
1838 outer_and_req) &&
1840 inner_path->parent->relids,
1841 inner_and_req))
1842 pclauses = lappend(pclauses, rinfo);
1843 }
1844
1845 /* Consider joinclauses generated by EquivalenceClasses, too */
1847 join_and_req,
1848 required_outer,
1849 joinrel,
1850 NULL);
1851 /* We only want ones that aren't movable to lower levels */
1852 dropped_ecs = NIL;
1853 foreach(lc, eclauses)
1854 {
1855 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1856
1858 joinrel->relids,
1859 join_and_req));
1861 outer_path->parent->relids,
1862 outer_and_req))
1863 continue; /* drop if movable into LHS */
1865 inner_path->parent->relids,
1866 inner_and_req))
1867 {
1868 /* drop if movable into RHS, but remember EC for use below */
1869 Assert(rinfo->left_ec == rinfo->right_ec);
1870 dropped_ecs = lappend(dropped_ecs, rinfo->left_ec);
1871 continue;
1872 }
1873 pclauses = lappend(pclauses, rinfo);
1874 }
1875
1876 /*
1877 * EquivalenceClasses are harder to deal with than we could wish, because
1878 * of the fact that a given EC can generate different clauses depending on
1879 * context. Suppose we have an EC {X.X, Y.Y, Z.Z} where X and Y are the
1880 * LHS and RHS of the current join and Z is in required_outer, and further
1881 * suppose that the inner_path is parameterized by both X and Z. The code
1882 * above will have produced either Z.Z = X.X or Z.Z = Y.Y from that EC,
1883 * and in the latter case will have discarded it as being movable into the
1884 * RHS. However, the EC machinery might have produced either Y.Y = X.X or
1885 * Y.Y = Z.Z as the EC enforcement clause within the inner_path; it will
1886 * not have produced both, and we can't readily tell from here which one
1887 * it did pick. If we add no clause to this join, we'll end up with
1888 * insufficient enforcement of the EC; either Z.Z or X.X will fail to be
1889 * constrained to be equal to the other members of the EC. (When we come
1890 * to join Z to this X/Y path, we will certainly drop whichever EC clause
1891 * is generated at that join, so this omission won't get fixed later.)
1892 *
1893 * To handle this, for each EC we discarded such a clause from, try to
1894 * generate a clause connecting the required_outer rels to the join's LHS
1895 * ("Z.Z = X.X" in the terms of the above example). If successful, and if
1896 * the clause can't be moved to the LHS, add it to the current join's
1897 * restriction clauses. (If an EC cannot generate such a clause then it
1898 * has nothing that needs to be enforced here, while if the clause can be
1899 * moved into the LHS then it should have been enforced within that path.)
1900 *
1901 * Note that we don't need similar processing for ECs whose clause was
1902 * considered to be movable into the LHS, because the LHS can't refer to
1903 * the RHS so there is no comparable ambiguity about what it might
1904 * actually be enforcing internally.
1905 */
1906 if (dropped_ecs)
1907 {
1908 Relids real_outer_and_req;
1909
1910 real_outer_and_req = bms_union(outer_path->parent->relids,
1911 required_outer);
1912 eclauses =
1914 dropped_ecs,
1915 real_outer_and_req,
1916 required_outer,
1917 outer_path->parent);
1918 foreach(lc, eclauses)
1919 {
1920 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
1921
1923 outer_path->parent->relids,
1924 real_outer_and_req));
1925 if (!join_clause_is_movable_into(rinfo,
1926 outer_path->parent->relids,
1927 outer_and_req))
1928 pclauses = lappend(pclauses, rinfo);
1929 }
1930 }
1931
1932 /*
1933 * Now, attach the identified moved-down clauses to the caller's
1934 * restrict_clauses list. By using list_concat in this order, we leave
1935 * the original list structure of restrict_clauses undamaged.
1936 */
1937 *restrict_clauses = list_concat(pclauses, *restrict_clauses);
1938
1939 /* If we already have a PPI for this parameterization, just return it */
1940 if ((ppi = find_param_path_info(joinrel, required_outer)))
1941 return ppi;
1942
1943 /* Estimate the number of rows returned by the parameterized join */
1944 rows = get_parameterized_joinrel_size(root, joinrel,
1945 outer_path,
1946 inner_path,
1947 sjinfo,
1948 *restrict_clauses);
1949
1950 /*
1951 * And now we can build the ParamPathInfo. No point in saving the
1952 * input-pair-dependent clause list, though.
1953 *
1954 * Note: in GEQO mode, we'll be called in a temporary memory context, but
1955 * the joinrel structure is there too, so no problem.
1956 */
1957 ppi = makeNode(ParamPathInfo);
1958 ppi->ppi_req_outer = required_outer;
1959 ppi->ppi_rows = rows;
1960 ppi->ppi_clauses = NIL;
1961 ppi->ppi_serials = NULL;
1962 joinrel->ppilist = lappend(joinrel->ppilist, ppi);
1963
1964 return ppi;
1965}
double get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, Path *outer_path, Path *inner_path, SpecialJoinInfo *sjinfo, List *restrict_clauses)
Definition: costsize.c:5469
List * generate_join_implied_equalities_for_ecs(PlannerInfo *root, List *eclasses, Relids join_relids, Relids outer_relids, RelOptInfo *inner_rel)
Definition: equivclass.c:1650

References Assert(), bms_is_empty, bms_is_subset(), bms_overlap(), bms_union(), find_param_path_info(), generate_join_implied_equalities(), generate_join_implied_equalities_for_ecs(), get_parameterized_joinrel_size(), join_clause_is_movable_into(), RelOptInfo::joininfo, lappend(), RelOptInfo::lateral_relids, lfirst, list_concat(), makeNode, NIL, PATH_REQ_OUTER, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, RelOptInfo::relids, and root.

Referenced by create_hashjoin_path(), create_mergejoin_path(), and create_nestloop_path().

◆ get_param_path_clause_serials()

Bitmapset * get_param_path_clause_serials ( Path path)

Definition at line 2032 of file relnode.c.

2033{
2034 if (path->param_info == NULL)
2035 return NULL; /* not parameterized */
2036
2037 /*
2038 * We don't currently support parameterized MergeAppend paths, as
2039 * explained in the comments for generate_orderedappend_paths.
2040 */
2041 Assert(!IsA(path, MergeAppendPath));
2042
2043 if (IsA(path, NestPath) ||
2044 IsA(path, MergePath) ||
2045 IsA(path, HashPath))
2046 {
2047 /*
2048 * For a join path, combine clauses enforced within either input path
2049 * with those enforced as joinrestrictinfo in this path. Note that
2050 * joinrestrictinfo may include some non-pushed-down clauses, but for
2051 * current purposes it's okay if we include those in the result. (To
2052 * be more careful, we could check for clause_relids overlapping the
2053 * path parameterization, but it's not worth the cycles for now.)
2054 */
2055 JoinPath *jpath = (JoinPath *) path;
2056 Bitmapset *pserials;
2057 ListCell *lc;
2058
2059 pserials = NULL;
2060 pserials = bms_add_members(pserials,
2062 pserials = bms_add_members(pserials,
2064 foreach(lc, jpath->joinrestrictinfo)
2065 {
2066 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2067
2068 pserials = bms_add_member(pserials, rinfo->rinfo_serial);
2069 }
2070 return pserials;
2071 }
2072 else if (IsA(path, AppendPath))
2073 {
2074 /*
2075 * For an appendrel, take the intersection of the sets of clauses
2076 * enforced in each input path.
2077 */
2078 AppendPath *apath = (AppendPath *) path;
2079 Bitmapset *pserials;
2080 ListCell *lc;
2081
2082 pserials = NULL;
2083 foreach(lc, apath->subpaths)
2084 {
2085 Path *subpath = (Path *) lfirst(lc);
2086 Bitmapset *subserials;
2087
2089 if (lc == list_head(apath->subpaths))
2090 pserials = bms_copy(subserials);
2091 else
2092 pserials = bms_int_members(pserials, subserials);
2093 }
2094 return pserials;
2095 }
2096 else
2097 {
2098 /*
2099 * Otherwise, it's a baserel path and we can use the
2100 * previously-computed set of serial numbers.
2101 */
2102 return path->param_info->ppi_serials;
2103 }
2104}
Bitmapset * bms_int_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1109
static ListCell * list_head(const List *l)
Definition: pg_list.h:128

References Assert(), bms_add_member(), bms_add_members(), bms_copy(), bms_int_members(), get_param_path_clause_serials(), JoinPath::innerjoinpath, IsA, JoinPath::joinrestrictinfo, lfirst, list_head(), JoinPath::outerjoinpath, RestrictInfo::rinfo_serial, subpath(), and AppendPath::subpaths.

Referenced by create_nestloop_path(), and get_param_path_clause_serials().

◆ min_join_parameterization()

Relids min_join_parameterization ( PlannerInfo root,
Relids  joinrelids,
RelOptInfo outer_rel,
RelOptInfo inner_rel 
)

Definition at line 1145 of file relnode.c.

1149{
1150 Relids result;
1151
1152 /*
1153 * Basically we just need the union of the inputs' lateral_relids, less
1154 * whatever is already in the join.
1155 *
1156 * It's not immediately obvious that this is a valid way to compute the
1157 * result, because it might seem that we're ignoring possible lateral refs
1158 * of PlaceHolderVars that are due to be computed at the join but not in
1159 * either input. However, because create_lateral_join_info() already
1160 * charged all such PHV refs to each member baserel of the join, they'll
1161 * be accounted for already in the inputs' lateral_relids. Likewise, we
1162 * do not need to worry about doing transitive closure here, because that
1163 * was already accounted for in the original baserel lateral_relids.
1164 */
1165 result = bms_union(outer_rel->lateral_relids, inner_rel->lateral_relids);
1166 result = bms_del_members(result, joinrelids);
1167 return result;
1168}

References bms_del_members(), bms_union(), and RelOptInfo::lateral_relids.

Referenced by build_join_rel(), and join_is_legal().

◆ path_is_reparameterizable_by_child()

bool path_is_reparameterizable_by_child ( Path path,
RelOptInfo child_rel 
)

Definition at line 4315 of file pathnode.c.

4316{
4317#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \
4318do { \
4319 if (!path_is_reparameterizable_by_child(path, child_rel)) \
4320 return false; \
4321} while(0)
4322
4323#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \
4324do { \
4325 if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \
4326 return false; \
4327} while(0)
4328
4329 /*
4330 * If the path is not parameterized by the parent of the given relation,
4331 * it doesn't need reparameterization.
4332 */
4333 if (!path->param_info ||
4334 !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4335 return true;
4336
4337 /*
4338 * Check that the path type is one that reparameterize_path_by_child() can
4339 * handle, and recursively check subpaths.
4340 */
4341 switch (nodeTag(path))
4342 {
4343 case T_Path:
4344 case T_IndexPath:
4345 break;
4346
4347 case T_BitmapHeapPath:
4348 {
4349 BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4350
4352 }
4353 break;
4354
4355 case T_BitmapAndPath:
4356 {
4357 BitmapAndPath *bapath = (BitmapAndPath *) path;
4358
4360 }
4361 break;
4362
4363 case T_BitmapOrPath:
4364 {
4365 BitmapOrPath *bopath = (BitmapOrPath *) path;
4366
4368 }
4369 break;
4370
4371 case T_ForeignPath:
4372 {
4373 ForeignPath *fpath = (ForeignPath *) path;
4374
4375 if (fpath->fdw_outerpath)
4377 }
4378 break;
4379
4380 case T_CustomPath:
4381 {
4382 CustomPath *cpath = (CustomPath *) path;
4383
4385 }
4386 break;
4387
4388 case T_NestPath:
4389 case T_MergePath:
4390 case T_HashPath:
4391 {
4392 JoinPath *jpath = (JoinPath *) path;
4393
4396 }
4397 break;
4398
4399 case T_AppendPath:
4400 {
4401 AppendPath *apath = (AppendPath *) path;
4402
4404 }
4405 break;
4406
4407 case T_MaterialPath:
4408 {
4409 MaterialPath *mpath = (MaterialPath *) path;
4410
4412 }
4413 break;
4414
4415 case T_MemoizePath:
4416 {
4417 MemoizePath *mpath = (MemoizePath *) path;
4418
4420 }
4421 break;
4422
4423 case T_GatherPath:
4424 {
4425 GatherPath *gpath = (GatherPath *) path;
4426
4428 }
4429 break;
4430
4431 default:
4432 /* We don't know how to reparameterize this path. */
4433 return false;
4434 }
4435
4436 return true;
4437}
#define nodeTag(nodeptr)
Definition: nodes.h:139
#define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist)
#define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path)
List * custom_paths
Definition: pathnodes.h:2156

References BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_overlap(), CustomPath::custom_paths, ForeignPath::fdw_outerpath, JoinPath::innerjoinpath, nodeTag, JoinPath::outerjoinpath, PATH_REQ_OUTER, REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE, REJECT_IF_PATH_NOT_REPARAMETERIZABLE, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, and RelOptInfo::top_parent_relids.

Referenced by pathlist_is_reparameterizable_by_child(), try_nestloop_path(), and try_partial_nestloop_path().

◆ reparameterize_path()

Path * reparameterize_path ( PlannerInfo root,
Path path,
Relids  required_outer,
double  loop_count 
)

Definition at line 3853 of file pathnode.c.

3856{
3857 RelOptInfo *rel = path->parent;
3858
3859 /* Can only increase, not decrease, path's parameterization */
3860 if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3861 return NULL;
3862 switch (path->pathtype)
3863 {
3864 case T_SeqScan:
3865 return create_seqscan_path(root, rel, required_outer, 0);
3866 case T_SampleScan:
3867 return (Path *) create_samplescan_path(root, rel, required_outer);
3868 case T_IndexScan:
3869 case T_IndexOnlyScan:
3870 {
3871 IndexPath *ipath = (IndexPath *) path;
3872 IndexPath *newpath = makeNode(IndexPath);
3873
3874 /*
3875 * We can't use create_index_path directly, and would not want
3876 * to because it would re-compute the indexqual conditions
3877 * which is wasted effort. Instead we hack things a bit:
3878 * flat-copy the path node, revise its param_info, and redo
3879 * the cost estimate.
3880 */
3881 memcpy(newpath, ipath, sizeof(IndexPath));
3882 newpath->path.param_info =
3883 get_baserel_parampathinfo(root, rel, required_outer);
3884 cost_index(newpath, root, loop_count, false);
3885 return (Path *) newpath;
3886 }
3887 case T_BitmapHeapScan:
3888 {
3889 BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3890
3892 rel,
3893 bpath->bitmapqual,
3894 required_outer,
3895 loop_count, 0);
3896 }
3897 case T_SubqueryScan:
3898 {
3899 SubqueryScanPath *spath = (SubqueryScanPath *) path;
3900 Path *subpath = spath->subpath;
3901 bool trivial_pathtarget;
3902
3903 /*
3904 * If existing node has zero extra cost, we must have decided
3905 * its target is trivial. (The converse is not true, because
3906 * it might have a trivial target but quals to enforce; but in
3907 * that case the new node will too, so it doesn't matter
3908 * whether we get the right answer here.)
3909 */
3910 trivial_pathtarget =
3911 (subpath->total_cost == spath->path.total_cost);
3912
3914 rel,
3915 subpath,
3916 trivial_pathtarget,
3917 spath->path.pathkeys,
3918 required_outer);
3919 }
3920 case T_Result:
3921 /* Supported only for RTE_RESULT scan paths */
3922 if (IsA(path, Path))
3923 return create_resultscan_path(root, rel, required_outer);
3924 break;
3925 case T_Append:
3926 {
3927 AppendPath *apath = (AppendPath *) path;
3928 List *childpaths = NIL;
3929 List *partialpaths = NIL;
3930 int i;
3931 ListCell *lc;
3932
3933 /* Reparameterize the children */
3934 i = 0;
3935 foreach(lc, apath->subpaths)
3936 {
3937 Path *spath = (Path *) lfirst(lc);
3938
3939 spath = reparameterize_path(root, spath,
3940 required_outer,
3941 loop_count);
3942 if (spath == NULL)
3943 return NULL;
3944 /* We have to re-split the regular and partial paths */
3945 if (i < apath->first_partial_path)
3946 childpaths = lappend(childpaths, spath);
3947 else
3948 partialpaths = lappend(partialpaths, spath);
3949 i++;
3950 }
3951 return (Path *)
3952 create_append_path(root, rel, childpaths, partialpaths,
3953 apath->path.pathkeys, required_outer,
3954 apath->path.parallel_workers,
3955 apath->path.parallel_aware,
3956 -1);
3957 }
3958 case T_Material:
3959 {
3960 MaterialPath *mpath = (MaterialPath *) path;
3961 Path *spath = mpath->subpath;
3962
3963 spath = reparameterize_path(root, spath,
3964 required_outer,
3965 loop_count);
3966 if (spath == NULL)
3967 return NULL;
3968 return (Path *) create_material_path(rel, spath);
3969 }
3970 case T_Memoize:
3971 {
3972 MemoizePath *mpath = (MemoizePath *) path;
3973 Path *spath = mpath->subpath;
3974
3975 spath = reparameterize_path(root, spath,
3976 required_outer,
3977 loop_count);
3978 if (spath == NULL)
3979 return NULL;
3980 return (Path *) create_memoize_path(root, rel,
3981 spath,
3982 mpath->param_exprs,
3983 mpath->hash_operators,
3984 mpath->singlerow,
3985 mpath->binary_mode,
3986 mpath->est_calls);
3987 }
3988 default:
3989 break;
3990 }
3991 return NULL;
3992}
int i
Definition: isn.c:77
MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, bool singlerow, bool binary_mode, Cardinality est_calls)
Definition: pathnode.c:1690
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:983
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1300
SubqueryScanPath * create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, bool trivial_pathtarget, List *pathkeys, Relids required_outer)
Definition: pathnode.c:1846
BitmapHeapPath * create_bitmap_heap_path(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual, Relids required_outer, double loop_count, int parallel_degree)
Definition: pathnode.c:1098
Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:1008
MaterialPath * create_material_path(RelOptInfo *rel, Path *subpath)
Definition: pathnode.c:1657
Path * create_resultscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
Definition: pathnode.c:2006
Path * reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count)
Definition: pathnode.c:3853

References MemoizePath::binary_mode, BitmapHeapPath::bitmapqual, bms_is_subset(), cost_index(), create_append_path(), create_bitmap_heap_path(), create_material_path(), create_memoize_path(), create_resultscan_path(), create_samplescan_path(), create_seqscan_path(), create_subqueryscan_path(), MemoizePath::est_calls, get_baserel_parampathinfo(), MemoizePath::hash_operators, i, IsA, lappend(), lfirst, makeNode, NIL, Path::parallel_aware, Path::parallel_workers, MemoizePath::param_exprs, IndexPath::path, SubqueryScanPath::path, AppendPath::path, PATH_REQ_OUTER, Path::pathkeys, Path::pathtype, reparameterize_path(), root, MemoizePath::singlerow, subpath(), SubqueryScanPath::subpath, MaterialPath::subpath, MemoizePath::subpath, AppendPath::subpaths, and Path::total_cost.

Referenced by get_cheapest_parameterized_child_path(), and reparameterize_path().

◆ reparameterize_path_by_child()

Path * reparameterize_path_by_child ( PlannerInfo root,
Path path,
RelOptInfo child_rel 
)

Definition at line 4019 of file pathnode.c.

4021{
4022 Path *new_path;
4023 ParamPathInfo *new_ppi;
4024 ParamPathInfo *old_ppi;
4025 Relids required_outer;
4026
4027#define ADJUST_CHILD_ATTRS(node) \
4028 ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \
4029 (Node *) (node), \
4030 child_rel, \
4031 child_rel->top_parent))
4032
4033#define REPARAMETERIZE_CHILD_PATH(path) \
4034do { \
4035 (path) = reparameterize_path_by_child(root, (path), child_rel); \
4036 if ((path) == NULL) \
4037 return NULL; \
4038} while(0)
4039
4040#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4041do { \
4042 if ((pathlist) != NIL) \
4043 { \
4044 (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4045 child_rel); \
4046 if ((pathlist) == NIL) \
4047 return NULL; \
4048 } \
4049} while(0)
4050
4051 /*
4052 * If the path is not parameterized by the parent of the given relation,
4053 * it doesn't need reparameterization.
4054 */
4055 if (!path->param_info ||
4056 !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4057 return path;
4058
4059 /*
4060 * If possible, reparameterize the given path.
4061 *
4062 * This function is currently only applied to the inner side of a nestloop
4063 * join that is being partitioned by the partitionwise-join code. Hence,
4064 * we need only support path types that plausibly arise in that context.
4065 * (In particular, supporting sorted path types would be a waste of code
4066 * and cycles: even if we translated them here, they'd just lose in
4067 * subsequent cost comparisons.) If we do see an unsupported path type,
4068 * that just means we won't be able to generate a partitionwise-join plan
4069 * using that path type.
4070 */
4071 switch (nodeTag(path))
4072 {
4073 case T_Path:
4074 new_path = path;
4075 ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo);
4076 if (path->pathtype == T_SampleScan)
4077 {
4078 Index scan_relid = path->parent->relid;
4079 RangeTblEntry *rte;
4080
4081 /* it should be a base rel with a tablesample clause... */
4082 Assert(scan_relid > 0);
4083 rte = planner_rt_fetch(scan_relid, root);
4084 Assert(rte->rtekind == RTE_RELATION);
4085 Assert(rte->tablesample != NULL);
4086
4088 }
4089 break;
4090
4091 case T_IndexPath:
4092 {
4093 IndexPath *ipath = (IndexPath *) path;
4094
4097 new_path = (Path *) ipath;
4098 }
4099 break;
4100
4101 case T_BitmapHeapPath:
4102 {
4103 BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4104
4105 ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo);
4107 new_path = (Path *) bhpath;
4108 }
4109 break;
4110
4111 case T_BitmapAndPath:
4112 {
4113 BitmapAndPath *bapath = (BitmapAndPath *) path;
4114
4116 new_path = (Path *) bapath;
4117 }
4118 break;
4119
4120 case T_BitmapOrPath:
4121 {
4122 BitmapOrPath *bopath = (BitmapOrPath *) path;
4123
4125 new_path = (Path *) bopath;
4126 }
4127 break;
4128
4129 case T_ForeignPath:
4130 {
4131 ForeignPath *fpath = (ForeignPath *) path;
4133
4134 ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo);
4135 if (fpath->fdw_outerpath)
4137 if (fpath->fdw_restrictinfo)
4139
4140 /* Hand over to FDW if needed. */
4141 rfpc_func =
4142 path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4143 if (rfpc_func)
4144 fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4145 child_rel);
4146 new_path = (Path *) fpath;
4147 }
4148 break;
4149
4150 case T_CustomPath:
4151 {
4152 CustomPath *cpath = (CustomPath *) path;
4153
4154 ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo);
4156 if (cpath->custom_restrictinfo)
4158 if (cpath->methods &&
4160 cpath->custom_private =
4162 cpath->custom_private,
4163 child_rel);
4164 new_path = (Path *) cpath;
4165 }
4166 break;
4167
4168 case T_NestPath:
4169 {
4170 NestPath *npath = (NestPath *) path;
4171 JoinPath *jpath = (JoinPath *) npath;
4172
4176 new_path = (Path *) npath;
4177 }
4178 break;
4179
4180 case T_MergePath:
4181 {
4182 MergePath *mpath = (MergePath *) path;
4183 JoinPath *jpath = (JoinPath *) mpath;
4184
4189 new_path = (Path *) mpath;
4190 }
4191 break;
4192
4193 case T_HashPath:
4194 {
4195 HashPath *hpath = (HashPath *) path;
4196 JoinPath *jpath = (JoinPath *) hpath;
4197
4202 new_path = (Path *) hpath;
4203 }
4204 break;
4205
4206 case T_AppendPath:
4207 {
4208 AppendPath *apath = (AppendPath *) path;
4209
4211 new_path = (Path *) apath;
4212 }
4213 break;
4214
4215 case T_MaterialPath:
4216 {
4217 MaterialPath *mpath = (MaterialPath *) path;
4218
4220 new_path = (Path *) mpath;
4221 }
4222 break;
4223
4224 case T_MemoizePath:
4225 {
4226 MemoizePath *mpath = (MemoizePath *) path;
4227
4230 new_path = (Path *) mpath;
4231 }
4232 break;
4233
4234 case T_GatherPath:
4235 {
4236 GatherPath *gpath = (GatherPath *) path;
4237
4239 new_path = (Path *) gpath;
4240 }
4241 break;
4242
4243 default:
4244 /* We don't know how to reparameterize this path. */
4245 return NULL;
4246 }
4247
4248 /*
4249 * Adjust the parameterization information, which refers to the topmost
4250 * parent. The topmost parent can be multiple levels away from the given
4251 * child, hence use multi-level expression adjustment routines.
4252 */
4253 old_ppi = new_path->param_info;
4254 required_outer =
4256 child_rel,
4257 child_rel->top_parent);
4258
4259 /* If we already have a PPI for this parameterization, just return it */
4260 new_ppi = find_param_path_info(new_path->parent, required_outer);
4261
4262 /*
4263 * If not, build a new one and link it to the list of PPIs. For the same
4264 * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4265 * context the given RelOptInfo is in.
4266 */
4267 if (new_ppi == NULL)
4268 {
4269 MemoryContext oldcontext;
4270 RelOptInfo *rel = path->parent;
4271
4273
4274 new_ppi = makeNode(ParamPathInfo);
4275 new_ppi->ppi_req_outer = bms_copy(required_outer);
4276 new_ppi->ppi_rows = old_ppi->ppi_rows;
4277 new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4279 new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4280 rel->ppilist = lappend(rel->ppilist, new_ppi);
4281
4282 MemoryContextSwitchTo(oldcontext);
4283 }
4284 bms_free(required_outer);
4285
4286 new_path->param_info = new_ppi;
4287
4288 /*
4289 * Adjust the path target if the parent of the outer relation is
4290 * referenced in the targetlist. This can happen when only the parent of
4291 * outer relation is laterally referenced in this relation.
4292 */
4293 if (bms_overlap(path->parent->lateral_relids,
4294 child_rel->top_parent_relids))
4295 {
4296 new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4297 ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4298 }
4299
4300 return new_path;
4301}
Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:659
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
List *(* ReparameterizeForeignPathByChild_function)(PlannerInfo *root, List *fdw_private, RelOptInfo *child_rel)
Definition: fdwapi.h:182
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:753
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist)
#define REPARAMETERIZE_CHILD_PATH(path)
#define ADJUST_CHILD_ATTRS(node)
#define planner_rt_fetch(rti, root)
Definition: pathnodes.h:610
struct List *(* ReparameterizeCustomPathByChild)(PlannerInfo *root, List *custom_private, RelOptInfo *child_rel)
Definition: extensible.h:103
const struct CustomPathMethods * methods
Definition: pathnodes.h:2159
List * custom_private
Definition: pathnodes.h:2158
List * custom_restrictinfo
Definition: pathnodes.h:2157
List * indrestrictinfo
Definition: pathnodes.h:1321
struct TableSampleClause * tablesample
Definition: parsenodes.h:1129
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657

References ADJUST_CHILD_ATTRS, adjust_child_relids_multilevel(), Assert(), BitmapHeapPath::bitmapqual, BitmapAndPath::bitmapquals, BitmapOrPath::bitmapquals, bms_copy(), bms_free(), bms_overlap(), copy_pathtarget(), CustomPath::custom_paths, CustomPath::custom_private, CustomPath::custom_restrictinfo, ForeignPath::fdw_outerpath, ForeignPath::fdw_private, ForeignPath::fdw_restrictinfo, find_param_path_info(), GetMemoryChunkContext(), IndexPath::indexclauses, IndexPath::indexinfo, IndexOptInfo::indrestrictinfo, JoinPath::innerjoinpath, JoinPath::joinrestrictinfo, lappend(), makeNode, MemoryContextSwitchTo(), CustomPath::methods, nodeTag, JoinPath::outerjoinpath, MemoizePath::param_exprs, BitmapHeapPath::path, ForeignPath::path, CustomPath::path, HashPath::path_hashclauses, MergePath::path_mergeclauses, PATH_REQ_OUTER, Path::pathtype, planner_rt_fetch, ParamPathInfo::ppi_clauses, ParamPathInfo::ppi_req_outer, ParamPathInfo::ppi_rows, ParamPathInfo::ppi_serials, RelOptInfo::ppilist, REPARAMETERIZE_CHILD_PATH, REPARAMETERIZE_CHILD_PATH_LIST, CustomPathMethods::ReparameterizeCustomPathByChild, root, RTE_RELATION, RangeTblEntry::rtekind, MaterialPath::subpath, MemoizePath::subpath, GatherPath::subpath, AppendPath::subpaths, RangeTblEntry::tablesample, and RelOptInfo::top_parent_relids.

Referenced by create_nestloop_plan(), and reparameterize_pathlist_by_child().

◆ set_cheapest()

void set_cheapest ( RelOptInfo parent_rel)

Definition at line 270 of file pathnode.c.

271{
272 Path *cheapest_startup_path;
273 Path *cheapest_total_path;
274 Path *best_param_path;
275 List *parameterized_paths;
276 ListCell *p;
277
278 Assert(IsA(parent_rel, RelOptInfo));
279
280 if (parent_rel->pathlist == NIL)
281 elog(ERROR, "could not devise a query plan for the given query");
282
283 cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
284 parameterized_paths = NIL;
285
286 foreach(p, parent_rel->pathlist)
287 {
288 Path *path = (Path *) lfirst(p);
289 int cmp;
290
291 if (path->param_info)
292 {
293 /* Parameterized path, so add it to parameterized_paths */
294 parameterized_paths = lappend(parameterized_paths, path);
295
296 /*
297 * If we have an unparameterized cheapest-total, we no longer care
298 * about finding the best parameterized path, so move on.
299 */
300 if (cheapest_total_path)
301 continue;
302
303 /*
304 * Otherwise, track the best parameterized path, which is the one
305 * with least total cost among those of the minimum
306 * parameterization.
307 */
308 if (best_param_path == NULL)
309 best_param_path = path;
310 else
311 {
313 PATH_REQ_OUTER(best_param_path)))
314 {
315 case BMS_EQUAL:
316 /* keep the cheaper one */
317 if (compare_path_costs(path, best_param_path,
318 TOTAL_COST) < 0)
319 best_param_path = path;
320 break;
321 case BMS_SUBSET1:
322 /* new path is less-parameterized */
323 best_param_path = path;
324 break;
325 case BMS_SUBSET2:
326 /* old path is less-parameterized, keep it */
327 break;
328 case BMS_DIFFERENT:
329
330 /*
331 * This means that neither path has the least possible
332 * parameterization for the rel. We'll sit on the old
333 * path until something better comes along.
334 */
335 break;
336 }
337 }
338 }
339 else
340 {
341 /* Unparameterized path, so consider it for cheapest slots */
342 if (cheapest_total_path == NULL)
343 {
344 cheapest_startup_path = cheapest_total_path = path;
345 continue;
346 }
347
348 /*
349 * If we find two paths of identical costs, try to keep the
350 * better-sorted one. The paths might have unrelated sort
351 * orderings, in which case we can only guess which might be
352 * better to keep, but if one is superior then we definitely
353 * should keep that one.
354 */
355 cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
356 if (cmp > 0 ||
357 (cmp == 0 &&
358 compare_pathkeys(cheapest_startup_path->pathkeys,
359 path->pathkeys) == PATHKEYS_BETTER2))
360 cheapest_startup_path = path;
361
362 cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
363 if (cmp > 0 ||
364 (cmp == 0 &&
365 compare_pathkeys(cheapest_total_path->pathkeys,
366 path->pathkeys) == PATHKEYS_BETTER2))
367 cheapest_total_path = path;
368 }
369 }
370
371 /* Add cheapest unparameterized path, if any, to parameterized_paths */
372 if (cheapest_total_path)
373 parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
374
375 /*
376 * If there is no unparameterized path, use the best parameterized path as
377 * cheapest_total_path (but not as cheapest_startup_path).
378 */
379 if (cheapest_total_path == NULL)
380 cheapest_total_path = best_param_path;
381 Assert(cheapest_total_path != NULL);
382
383 parent_rel->cheapest_startup_path = cheapest_startup_path;
384 parent_rel->cheapest_total_path = cheapest_total_path;
385 parent_rel->cheapest_parameterized_paths = parameterized_paths;
386}
@ BMS_DIFFERENT
Definition: bitmapset.h:65
List * lcons(void *datum, List *list)
Definition: list.c:495
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:743

References Assert(), BMS_DIFFERENT, BMS_EQUAL, BMS_SUBSET1, BMS_SUBSET2, bms_subset_compare(), RelOptInfo::cheapest_parameterized_paths, RelOptInfo::cheapest_startup_path, RelOptInfo::cheapest_total_path, cmp(), compare_path_costs(), compare_pathkeys(), elog, ERROR, IsA, lappend(), lcons(), lfirst, NIL, PATH_REQ_OUTER, Path::pathkeys, PATHKEYS_BETTER2, RelOptInfo::pathlist, STARTUP_COST, and TOTAL_COST.

Referenced by apply_scanjoin_target_to_paths(), create_distinct_paths(), create_grouping_paths(), create_ordinary_grouping_paths(), create_partial_distinct_paths(), create_partial_unique_paths(), create_partitionwise_grouping_paths(), create_unique_paths(), create_window_paths(), generate_partitionwise_join_paths(), mark_dummy_rel(), merge_clump(), postprocess_setop_rel(), query_planner(), set_dummy_rel_pathlist(), set_grouped_rel_pathlist(), set_rel_pathlist(), standard_join_search(), and subquery_planner().

◆ setup_simple_rel_arrays()

void setup_simple_rel_arrays ( PlannerInfo root)

Definition at line 108 of file relnode.c.

109{
110 int size;
111 Index rti;
112 ListCell *lc;
113
114 /* Arrays are accessed using RT indexes (1..N) */
115 size = list_length(root->parse->rtable) + 1;
116 root->simple_rel_array_size = size;
117
118 /*
119 * simple_rel_array is initialized to all NULLs, since no RelOptInfos
120 * exist yet. It'll be filled by later calls to build_simple_rel().
121 */
122 root->simple_rel_array = (RelOptInfo **)
123 palloc0(size * sizeof(RelOptInfo *));
124
125 /* simple_rte_array is an array equivalent of the rtable list */
126 root->simple_rte_array = (RangeTblEntry **)
127 palloc0(size * sizeof(RangeTblEntry *));
128 rti = 1;
129 foreach(lc, root->parse->rtable)
130 {
131 RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
132
133 root->simple_rte_array[rti++] = rte;
134 }
135
136 /* append_rel_array is not needed if there are no AppendRelInfos */
137 if (root->append_rel_list == NIL)
138 {
139 root->append_rel_array = NULL;
140 return;
141 }
142
143 root->append_rel_array = (AppendRelInfo **)
144 palloc0(size * sizeof(AppendRelInfo *));
145
146 /*
147 * append_rel_array is filled with any already-existing AppendRelInfos,
148 * which currently could only come from UNION ALL flattening. We might
149 * add more later during inheritance expansion, but it's the
150 * responsibility of the expansion code to update the array properly.
151 */
152 foreach(lc, root->append_rel_list)
153 {
155 int child_relid = appinfo->child_relid;
156
157 /* Sanity check */
158 Assert(child_relid < size);
159
160 if (root->append_rel_array[child_relid])
161 elog(ERROR, "child relation already exists");
162
163 root->append_rel_array[child_relid] = appinfo;
164 }
165}
Index child_relid
Definition: pathnodes.h:3193

References Assert(), AppendRelInfo::child_relid, elog, ERROR, lfirst, lfirst_node, list_length(), NIL, palloc0(), and root.

Referenced by plan_cluster_use_sort(), plan_create_index_workers(), plan_set_operations(), and query_planner().