diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index e8cf0795b68..96271e36d5c 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -6046,6 +6046,15 @@ DistributedBy: DISTRIBUTED BY '(' distributed_by_list ')' distributedBy->keyCols = NIL; $$ = (Node *)distributedBy; } + | DISTRIBUTED LOCAL + { + DistributedBy *distributedBy = makeNode(DistributedBy); + distributedBy->ptype = POLICYTYPE_LOCAL; + distributedBy->numsegments = -1; + distributedBy->keyCols = NIL; + $$ = (Node *)distributedBy; + + } ; OptDistributedBy: DistributedBy diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 16d43c82a93..bb7db44b55b 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -2417,6 +2417,9 @@ transformDistributedBy(ParseState *pstate, ListCell *lc; int numsegments; + if(distributedBy->ptype == POLICYTYPE_LOCAL){ + return distributedBy; + } /* * utility mode creates can't have a policy. Only the QD can have policies */ @@ -3134,6 +3137,9 @@ getPolicyForDistributedBy(DistributedBy *distributedBy, TupleDesc tupdesc) case POLICYTYPE_REPLICATED: return createReplicatedGpPolicy(distributedBy->numsegments); + + case POLICYTYPE_LOCAL: + return NULL; } elog(ERROR, "unrecognized policy type %d", distributedBy->ptype); return NULL; diff --git a/src/include/catalog/gp_distribution_policy.h b/src/include/catalog/gp_distribution_policy.h index fdb864e6813..a8dc5e8883d 100644 --- a/src/include/catalog/gp_distribution_policy.h +++ b/src/include/catalog/gp_distribution_policy.h @@ -86,7 +86,8 @@ typedef enum GpPolicyType { POLICYTYPE_PARTITIONED, /* Tuples partitioned onto segment database. */ POLICYTYPE_ENTRY, /* Tuples stored on entry database. */ - POLICYTYPE_REPLICATED /* Tuples stored a copy on all segment database. */ + POLICYTYPE_REPLICATED, /* Tuples stored a copy on all segment database. */ + POLICYTYPE_LOCAL /* Tuples stored on coordinator */ } GpPolicyType; /* diff --git a/src/test/regress/expected/local_distribution.out b/src/test/regress/expected/local_distribution.out new file mode 100644 index 00000000000..480a2bba8a4 --- /dev/null +++ b/src/test/regress/expected/local_distribution.out @@ -0,0 +1,9 @@ +-- Creates db on coordinator shard without distributing +CREATE TABLE local_table (asd Int) DISTRIBUTED LOCAL; +EXPLAIN SELECT * FROM local_table; + QUERY PLAN +---------------------------------------------------------------- + Seq Scan on local_table (cost=0.00..106.30 rows=9630 width=4) + Optimizer: Postgres query optimizer +(2 rows) + diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index 34f917041d8..2216345fa45 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -4,6 +4,7 @@ # By convention, we put no more than twenty tests in any one parallel group; # this limits the number of connections needed to run the tests. # ---------- +test: local_distribution # run tablespace by itself, and first, because it forces a checkpoint; # we'd prefer not to have checkpoints later in the tests because that diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule index 41f4d4a4e61..d402c39e524 100644 --- a/src/test/regress/serial_schedule +++ b/src/test/regress/serial_schedule @@ -1,5 +1,6 @@ # src/test/regress/serial_schedule # This should probably be in an order similar to parallel_schedule. +test: local_distribution test: tablespace test: boolean test: char diff --git a/src/test/regress/sql/local_distribution.sql b/src/test/regress/sql/local_distribution.sql new file mode 100644 index 00000000000..05bab694a06 --- /dev/null +++ b/src/test/regress/sql/local_distribution.sql @@ -0,0 +1,5 @@ +-- Creates db on coordinator shard without distributing + +CREATE TABLE local_table (asd Int) DISTRIBUTED LOCAL; + +EXPLAIN SELECT * FROM local_table;